code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
#Spark_practice ``` import os from pyspark import SparkConf, SparkContext, SQLContext from pyspark.sql import SparkSession from pyspark.sql.functions import udf, length, when, col from pyspark.sql.types import BooleanType, IntegerType, LongType, StringType, ArrayType, FloatType, StructType, StructField import pyspark.sql.functions as F from pyspark.sql.functions import pandas_udf from pyspark.sql.functions import PandasUDFType from jinja2 import Environment, FileSystemLoader import pandas as pd os.environ["PYSPARK_PYTHON"] = "/opt/conda/bin/python" # setting constants SPARK_ADDRESS = "###############" APP_NAME = "Spark_practice" NORMALIZED_APP_NAME = APP_NAME.replace('/', '_').replace(':', '_') APPS_TMP_DIR = os.path.join(os.getcwd(), "tmp") APPS_CONF_DIR = os.path.join(os.getcwd(), "conf") APPS_LOGS_DIR = os.path.join(os.getcwd(), "logs") LOG4J_PROP_FILE = os.path.join(APPS_CONF_DIR, "pyspark-log4j-{}.properties".format(NORMALIZED_APP_NAME)) LOG_FILE = os.path.join(APPS_LOGS_DIR, 'pyspark-{}.log'.format(NORMALIZED_APP_NAME)) EXTRA_JAVA_OPTIONS = "-Dlog4j.configuration=file://{} -Dspark.hadoop.dfs.replication=1"\ .format(LOG4J_PROP_FILE) LOCAL_IP = socket.gethostbyname(socket.gethostname()) spark.stop() # preparing configuration files from templates for directory in [APPS_CONF_DIR, APPS_LOGS_DIR, APPS_TMP_DIR]: if not os.path.exists(directory): os.makedirs(directory) env = Environment(loader=FileSystemLoader('/opt')) template = env.get_template("pyspark_log4j.properties.template") template\ .stream(logfile=LOG_FILE)\ .dump(LOG4J_PROP_FILE) # run spark spark = SparkSession\ .builder\ .appName(APP_NAME)\ .master(SPARK_ADDRESS)\ .config('spark.ui.port', "####")\ .config("spark.driver.host", LOCAL_IP)\ .config("spark.memory.fraction", "0.8")\ .config("spark.memory.storageFraction", "0.6")\ .config("spark.executor.instances", "5")\ .config("spark.executor.cores", '4')\ .config("spark.executor.memory", "5g")\ .config("spark.driver.memory", "4g")\ .config("spark.driver.extraJavaOptions", EXTRA_JAVA_OPTIONS)\ .config("spark.executor.memory", "6g")\ .config("spark.kubernetes.namespace", "#########")\ .config("spark.kubernetes.driver.label.appname", APP_NAME)\ .config("spark.kubernetes.executor.label.appname", APP_NAME)\ .config("spark.kubernetes.container.image.pullPolicy", "Always")\ .config("spark.kubernetes.container.image", "node##.st:###/spark-executor:########")\ .config("spark.kubernetes.executor.deleteOnTermination", "true")\ .config("spark.local.dir", "/tmp/spark")\ .getOrCreate() posts_df = spark.read.json("hdfs:///shared/bigdata20/posts_api.json") posts_likes_df = spark.read.parquet("hdfs:///shared/bigdata20/posts_likes.parquet") followers_df = spark.read.parquet("hdfs:///shared/bigdata20/followers.parquet") followers_posts_df = spark.read.json("hdfs:///shared/bigdata20/followers_posts_api_final.json") followers_posts_likes_df = spark.read.parquet("hdfs:///shared/bigdata20/followers_posts_likes.parquet") posts_df.printSchema() ``` ## Top 20 posts in the group by likes ``` likes = posts_df.select('id', 'likes.count')\ .orderBy("likes", ascending = False)\ .limit(20) likes.show() ``` ## Top 20 posts in the group by comments ``` comments = posts_df.select('id', 'comments.count')\ .orderBy("comments", ascending = False)\ .limit(20) comments.show() ``` ## Top 20 posts in the group by reposts ``` reposts = posts_df.select('id', 'reposts.count')\ .orderBy("reposts", ascending = False)\ .limit(20) reposts.show() ``` ## Top 20 users by likes ``` users_likes= posts_likes_df.groupby('likerId')\ .agg(F.count('itemId').name('likes'))\ .orderBy('likes', ascending = False)\ .limit(20) users_likes.show() ``` ## Top 20 users by reposts ``` users_reposts = posts_df.where(col('copy_history.owner_id').getItem(0) > 0)\ .groupby(col('copy_history.owner_id').name('user_id'))\ .agg(F.count(col('copy_history')).name('reposts_count'))\ .orderBy('reposts_count', ascending = False)\ .limit(20) users_reposts.show() ``` ## Reposts of the original posts of the itmo group from user posts ``` itmo_posts = followers_posts_df\ .select('owner_id', col('copy_history')['id'].getItem(0).name('original_post_id'))\ .where(col('copy_history')['owner_id'].getItem(0) == -94)\ .groupby('original_post_id')\ .agg(F.collect_list('owner_id').name('user_ids'))\ .limit(10) itmo_posts.show() ``` ## Emoticons in posts ``` from pyspark.sql.types import ArrayType, StringType !{sys.executable} -m pip install --user --trusted-host pypi-registry.supplementary-services.svc.cluster.local --index http://pypi-registry.supplementary-services.svc.cluster.local:#### #### import emoji import re import string @udf(returnType=ArrayType(StringType())) def emojies(text): if (not text): return import string text = text.translate(str.maketrans('', '', string.punctuation)) text = emoji.demojize(text) text = re.findall(r'(:[^:]*:)', text) list_emoji = [emoji.emojize(x) for x in text] return list_emoji emojies_in_posts = posts_df.select("id", 'text')\ .withColumn("emojies", emojies(col("text")))\ .where("size(emojies) > 0")\ .select("id", F.explode("emojies").name("emoji"))\ .groupBy("emoji").agg(F.count("id").name("count"),F.countDistinct("id").name("frequency"),)\ .withColumn("avg_cnt_per_post", col("count") / col("frequency"))\ .withColumn("difference", col("count") - col("frequency"))\ .limit(5) emojies_in_posts.show() ``` ## Top 10 most popular emoticons ``` most_popular_emojies = emojies_in_posts.select("emoji", "count")\ .orderBy(F.desc("count"))\ .limit(5) most_popular_emojies.show() ``` ## Top 5 emoticons which have the greatest difference between their overall count and frequency ``` top_5_emojies = emojies_in_posts.select("emoji", "difference")\ .orderBy(F.desc("difference"))\ .limit(5) top_5_emojies.show() ``` ## Top 5 emoticons with average count per post ``` top_5 = emojies_in_posts.select("emoji", "avg_cnt_per_post")\ .orderBy(F.desc("avg_cnt_per_post"))\ .limit(5) top_5.show() ``` ## Probable “fans” ``` from pyspark.sql.window import Window probable_fan = followers_posts_likes_df.select('ownerId', 'likerId', 'itemId')\ .groupby('likerId', 'ownerId')\ .agg(F.count('itemId').name('likes_num'))\ .orderBy('likes_num')\ .withColumn('number', F.row_number().over(Window.partitionBy('likerId').orderBy(F.desc('likes_num')))) probable_fans = probable_fan.select('likerId','ownerId').where(probable_fan.number <= 10).orderBy('likerId', 'number') probable_fans.show(10) ``` ## Probable friends ``` probable_friends = probable_fans.alias("z1")\ .join(probable_fans.alias("z2"), (col('z2.likerId') == col('z1.ownerId')), 'inner')\ .select(col('z1.likerId'), col('z1.ownerId'), col('z2.ownerId').alias('ownerId2'))\ .where((col('likerId') == col('ownerId2')) & (col('likerId') != col('ownerId')))\ .select(col('likerId'),col('ownerId')).distinct().limit(10) probable_friends.show() ```
github_jupyter
import os from pyspark import SparkConf, SparkContext, SQLContext from pyspark.sql import SparkSession from pyspark.sql.functions import udf, length, when, col from pyspark.sql.types import BooleanType, IntegerType, LongType, StringType, ArrayType, FloatType, StructType, StructField import pyspark.sql.functions as F from pyspark.sql.functions import pandas_udf from pyspark.sql.functions import PandasUDFType from jinja2 import Environment, FileSystemLoader import pandas as pd os.environ["PYSPARK_PYTHON"] = "/opt/conda/bin/python" # setting constants SPARK_ADDRESS = "###############" APP_NAME = "Spark_practice" NORMALIZED_APP_NAME = APP_NAME.replace('/', '_').replace(':', '_') APPS_TMP_DIR = os.path.join(os.getcwd(), "tmp") APPS_CONF_DIR = os.path.join(os.getcwd(), "conf") APPS_LOGS_DIR = os.path.join(os.getcwd(), "logs") LOG4J_PROP_FILE = os.path.join(APPS_CONF_DIR, "pyspark-log4j-{}.properties".format(NORMALIZED_APP_NAME)) LOG_FILE = os.path.join(APPS_LOGS_DIR, 'pyspark-{}.log'.format(NORMALIZED_APP_NAME)) EXTRA_JAVA_OPTIONS = "-Dlog4j.configuration=file://{} -Dspark.hadoop.dfs.replication=1"\ .format(LOG4J_PROP_FILE) LOCAL_IP = socket.gethostbyname(socket.gethostname()) spark.stop() # preparing configuration files from templates for directory in [APPS_CONF_DIR, APPS_LOGS_DIR, APPS_TMP_DIR]: if not os.path.exists(directory): os.makedirs(directory) env = Environment(loader=FileSystemLoader('/opt')) template = env.get_template("pyspark_log4j.properties.template") template\ .stream(logfile=LOG_FILE)\ .dump(LOG4J_PROP_FILE) # run spark spark = SparkSession\ .builder\ .appName(APP_NAME)\ .master(SPARK_ADDRESS)\ .config('spark.ui.port', "####")\ .config("spark.driver.host", LOCAL_IP)\ .config("spark.memory.fraction", "0.8")\ .config("spark.memory.storageFraction", "0.6")\ .config("spark.executor.instances", "5")\ .config("spark.executor.cores", '4')\ .config("spark.executor.memory", "5g")\ .config("spark.driver.memory", "4g")\ .config("spark.driver.extraJavaOptions", EXTRA_JAVA_OPTIONS)\ .config("spark.executor.memory", "6g")\ .config("spark.kubernetes.namespace", "#########")\ .config("spark.kubernetes.driver.label.appname", APP_NAME)\ .config("spark.kubernetes.executor.label.appname", APP_NAME)\ .config("spark.kubernetes.container.image.pullPolicy", "Always")\ .config("spark.kubernetes.container.image", "node##.st:###/spark-executor:########")\ .config("spark.kubernetes.executor.deleteOnTermination", "true")\ .config("spark.local.dir", "/tmp/spark")\ .getOrCreate() posts_df = spark.read.json("hdfs:///shared/bigdata20/posts_api.json") posts_likes_df = spark.read.parquet("hdfs:///shared/bigdata20/posts_likes.parquet") followers_df = spark.read.parquet("hdfs:///shared/bigdata20/followers.parquet") followers_posts_df = spark.read.json("hdfs:///shared/bigdata20/followers_posts_api_final.json") followers_posts_likes_df = spark.read.parquet("hdfs:///shared/bigdata20/followers_posts_likes.parquet") posts_df.printSchema() likes = posts_df.select('id', 'likes.count')\ .orderBy("likes", ascending = False)\ .limit(20) likes.show() comments = posts_df.select('id', 'comments.count')\ .orderBy("comments", ascending = False)\ .limit(20) comments.show() reposts = posts_df.select('id', 'reposts.count')\ .orderBy("reposts", ascending = False)\ .limit(20) reposts.show() users_likes= posts_likes_df.groupby('likerId')\ .agg(F.count('itemId').name('likes'))\ .orderBy('likes', ascending = False)\ .limit(20) users_likes.show() users_reposts = posts_df.where(col('copy_history.owner_id').getItem(0) > 0)\ .groupby(col('copy_history.owner_id').name('user_id'))\ .agg(F.count(col('copy_history')).name('reposts_count'))\ .orderBy('reposts_count', ascending = False)\ .limit(20) users_reposts.show() itmo_posts = followers_posts_df\ .select('owner_id', col('copy_history')['id'].getItem(0).name('original_post_id'))\ .where(col('copy_history')['owner_id'].getItem(0) == -94)\ .groupby('original_post_id')\ .agg(F.collect_list('owner_id').name('user_ids'))\ .limit(10) itmo_posts.show() from pyspark.sql.types import ArrayType, StringType !{sys.executable} -m pip install --user --trusted-host pypi-registry.supplementary-services.svc.cluster.local --index http://pypi-registry.supplementary-services.svc.cluster.local:#### #### import emoji import re import string @udf(returnType=ArrayType(StringType())) def emojies(text): if (not text): return import string text = text.translate(str.maketrans('', '', string.punctuation)) text = emoji.demojize(text) text = re.findall(r'(:[^:]*:)', text) list_emoji = [emoji.emojize(x) for x in text] return list_emoji emojies_in_posts = posts_df.select("id", 'text')\ .withColumn("emojies", emojies(col("text")))\ .where("size(emojies) > 0")\ .select("id", F.explode("emojies").name("emoji"))\ .groupBy("emoji").agg(F.count("id").name("count"),F.countDistinct("id").name("frequency"),)\ .withColumn("avg_cnt_per_post", col("count") / col("frequency"))\ .withColumn("difference", col("count") - col("frequency"))\ .limit(5) emojies_in_posts.show() most_popular_emojies = emojies_in_posts.select("emoji", "count")\ .orderBy(F.desc("count"))\ .limit(5) most_popular_emojies.show() top_5_emojies = emojies_in_posts.select("emoji", "difference")\ .orderBy(F.desc("difference"))\ .limit(5) top_5_emojies.show() top_5 = emojies_in_posts.select("emoji", "avg_cnt_per_post")\ .orderBy(F.desc("avg_cnt_per_post"))\ .limit(5) top_5.show() from pyspark.sql.window import Window probable_fan = followers_posts_likes_df.select('ownerId', 'likerId', 'itemId')\ .groupby('likerId', 'ownerId')\ .agg(F.count('itemId').name('likes_num'))\ .orderBy('likes_num')\ .withColumn('number', F.row_number().over(Window.partitionBy('likerId').orderBy(F.desc('likes_num')))) probable_fans = probable_fan.select('likerId','ownerId').where(probable_fan.number <= 10).orderBy('likerId', 'number') probable_fans.show(10) probable_friends = probable_fans.alias("z1")\ .join(probable_fans.alias("z2"), (col('z2.likerId') == col('z1.ownerId')), 'inner')\ .select(col('z1.likerId'), col('z1.ownerId'), col('z2.ownerId').alias('ownerId2'))\ .where((col('likerId') == col('ownerId2')) & (col('likerId') != col('ownerId')))\ .select(col('likerId'),col('ownerId')).distinct().limit(10) probable_friends.show()
0.36727
0.567757
# Plotting with matplotlib This is a very quick introduction. It is provided here because plotting is fun, satisfying, and illuminating, and you probably need a break after the rather dry introduction to the numpy ndarray. Please refer to the matplotlib documentation (http://matplotlib.org/contents.html) for more information, and to the gallery (http://matplotlib.org/gallery.html) for more examples. For oceanographic examples, see the tutorials in http://currents.soest.hawaii.edu/ocn620/exercises.html. First, we need to decide whether we want the plots to appear as static objects below the code blocks (`inline` option), or as fully interactive plots in independent windows. Typically the latter is best during development and exploration, and the former is used when the notebook has reached the stage of being a document. The `inline` option is required for making static renderings such as the static html page. ``` # %matplotlib inline %matplotlib ``` Next, the standard imports: ``` import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt ``` ## Minimal exploratory plotting Suppose you have a time series of a variable, perhaps sea surface height, and you want to take a quick first look at it. For this example we will generate a fake data set so we don't have to bother downloading a real data set. ``` # time in hours t = np.arange(500) h = 2.5 * np.sin(2 * np.pi * t / 12.42) h += 1.5 * np.sin(2 * np.pi * t / 12.0) h += 0.3 * np.random.randn(len(t)) plt.plot(t, h,color='black') ``` That was easy. If you are using the independent windows, now is a good time to experiment with the navigation toolbar. You might try modifying the code block to span a much greater length of time, then zoom in to a short interval, and pan to slide that interval along. ## A plot you might show to someone Now let's make a plot that is intended for more than temporary exploration, so it will have a title, axis labels, and a legend. We will generate a fake data set with zonal and meridional velocity components. ``` def fake_tide(t, M2amp, M2phase, S2amp, S2phase, randamp): """ Generate a minimally realistic-looking fake semidiurnal tide. t is time in hours phases are in radians """ out = M2amp * np.sin(2 * np.pi * t / 12.42 - M2phase) out += S2amp * np.sin(2 * np.pi * t / 12.0 - S2phase) out += randamp * np.random.randn(len(t)) return out t = np.arange(500) u = fake_tide(t, 2, 0, 1, 0, 0.2) v = fake_tide(t, 1.2, np.pi / 2, 0.6, np.pi / 2, 0.2) plt.plot(t, u, label='U', color='Red', alpha=0.5) plt.plot(t, v, label='V', color='Blue', alpha=0.5) plt.legend(loc='lower right',fancybox=True) plt.xlabel('hours') plt.ylabel('m s$^{-1}$') plt.title('Demo with a fake semidiurnal tidal velocity record') ``` Now let's use two subplots with shared axes--that is, locked together--to look at two fake datasets. At the same time, we will switch to a more object-oriented style, which becomes increasing desirable as plot complexity increases. In this style we make very little use of pyplot. Instead, we create plot objects and operate on them by calling their methods. ``` u1 = fake_tide(t, 2.2, 0.3, 1, .3, 0.4) v1 = fake_tide(t, 1.1, 0.3 + np.pi / 2, 0.6, 0.3 + np.pi / 2, 0.4) legendkw = dict(loc='lower right', fancybox=True, fontsize='small') plotkwU=dict(label='U',color='Red', alpha=0.5) plotkwV=dict(label='V',color='Black', alpha=.8) fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, sharey=False) ax0.plot(t, u, **plotkwU) ax0.plot(t, v, **plotkwV) ax0.legend(**legendkw) ax1.plot(t, u1, **plotkwU) ax1.plot(t, v1, **plotkwV) ax1.legend(**legendkw) # keep the y tick labels from getting too crowded ax1.locator_params(axis='y', nbins=5) ax1.set_xlabel('hours') ax0.set_ylabel('m s$^{-1}$') ax1.set_ylabel('m s$^{-1}$') ax0.set_title('Location A') ax1.set_title('Location B') fig.suptitle('Demo with a fake semidiurnal tidal velocity record') ``` With the plot above in its own window, so that you have the navigation tools, try panning and zooming, or selecting a rectangle. You will see that the region selection is applied to both axes.
github_jupyter
# %matplotlib inline %matplotlib import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # time in hours t = np.arange(500) h = 2.5 * np.sin(2 * np.pi * t / 12.42) h += 1.5 * np.sin(2 * np.pi * t / 12.0) h += 0.3 * np.random.randn(len(t)) plt.plot(t, h,color='black') def fake_tide(t, M2amp, M2phase, S2amp, S2phase, randamp): """ Generate a minimally realistic-looking fake semidiurnal tide. t is time in hours phases are in radians """ out = M2amp * np.sin(2 * np.pi * t / 12.42 - M2phase) out += S2amp * np.sin(2 * np.pi * t / 12.0 - S2phase) out += randamp * np.random.randn(len(t)) return out t = np.arange(500) u = fake_tide(t, 2, 0, 1, 0, 0.2) v = fake_tide(t, 1.2, np.pi / 2, 0.6, np.pi / 2, 0.2) plt.plot(t, u, label='U', color='Red', alpha=0.5) plt.plot(t, v, label='V', color='Blue', alpha=0.5) plt.legend(loc='lower right',fancybox=True) plt.xlabel('hours') plt.ylabel('m s$^{-1}$') plt.title('Demo with a fake semidiurnal tidal velocity record') u1 = fake_tide(t, 2.2, 0.3, 1, .3, 0.4) v1 = fake_tide(t, 1.1, 0.3 + np.pi / 2, 0.6, 0.3 + np.pi / 2, 0.4) legendkw = dict(loc='lower right', fancybox=True, fontsize='small') plotkwU=dict(label='U',color='Red', alpha=0.5) plotkwV=dict(label='V',color='Black', alpha=.8) fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, sharey=False) ax0.plot(t, u, **plotkwU) ax0.plot(t, v, **plotkwV) ax0.legend(**legendkw) ax1.plot(t, u1, **plotkwU) ax1.plot(t, v1, **plotkwV) ax1.legend(**legendkw) # keep the y tick labels from getting too crowded ax1.locator_params(axis='y', nbins=5) ax1.set_xlabel('hours') ax0.set_ylabel('m s$^{-1}$') ax1.set_ylabel('m s$^{-1}$') ax0.set_title('Location A') ax1.set_title('Location B') fig.suptitle('Demo with a fake semidiurnal tidal velocity record')
0.60871
0.98882
ERROR: type should be string, got "https://nanonets.com/blog/human-pose-estimation-2d-guide/#deeppose\n \nhttps://github.com/opencv/opencv/blob/master/samples/dnn/openpose.py\n\n```\nimport cv2 as cv\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nnet = cv.dnn.readNetFromTensorflow(\"graph_opt.pb\")\ninWidth = 368\ninHeight = 368\nthr = 0.2\nBODY_PARTS = { \"Nose\": 0, \"Neck\": 1, \"RShoulder\": 2, \"RElbow\": 3, \"RWrist\": 4,\n \"LShoulder\": 5, \"LElbow\": 6, \"LWrist\": 7, \"RHip\": 8, \"RKnee\": 9,\n \"RAnkle\": 10, \"LHip\": 11, \"LKnee\": 12, \"LAnkle\": 13, \"REye\": 14,\n \"LEye\": 15, \"REar\": 16, \"LEar\": 17, \"Background\": 18 }\n\nPOSE_PAIRS = [ [\"Neck\", \"RShoulder\"], [\"Neck\", \"LShoulder\"], [\"RShoulder\", \"RElbow\"],\n [\"RElbow\", \"RWrist\"], [\"LShoulder\", \"LElbow\"], [\"LElbow\", \"LWrist\"],\n [\"Neck\", \"RHip\"], [\"RHip\", \"RKnee\"], [\"RKnee\", \"RAnkle\"], [\"Neck\", \"LHip\"],\n [\"LHip\", \"LKnee\"], [\"LKnee\", \"LAnkle\"], [\"Neck\", \"Nose\"], [\"Nose\", \"REye\"],\n [\"REye\", \"REar\"], [\"Nose\", \"LEye\"], [\"LEye\", \"LEar\"] ]\nimg =cv.imread('image.jpg')\nimg = cv.cvtColor(img, cv.COLOR_BGR2RGB)\nplt.imshow(img)\ndef pose_estimation(frame):\n \n frameWidth = frame.shape[1]\n frameHeight = frame.shape[0]\n \n net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False))\n out = net.forward()\n out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements\n\n\n assert(len(BODY_PARTS) <= out.shape[1])\n\n points = []\n for i in range(len(BODY_PARTS)):\n # Slice heatmap of corresponding body's part.\n heatMap = out[0, i, :, :]\n\n # Originally, we try to find all the local maximums. To simplify a sample\n # we just find a global one. However only a single pose at the same time\n # could be detected this way.\n _, conf, _, point = cv.minMaxLoc(heatMap)\n x = (frameWidth * point[0]) / out.shape[3]\n y = (frameHeight * point[1]) / out.shape[2]\n\n # Add a point if it's confidence is higher than threshold.\n points.append((int(x), int(y)) if conf > thr else None)\n\n for pair in POSE_PAIRS:\n partFrom = pair[0]\n partTo = pair[1]\n assert(partFrom in BODY_PARTS)\n assert(partTo in BODY_PARTS)\n\n idFrom = BODY_PARTS[partFrom]\n idTo = BODY_PARTS[partTo]\n\n if points[idFrom] and points[idTo]:\n cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)\n cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)\n cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)\n\n t, _ = net.getPerfProfile()\n freq = cv.getTickFrequency() / 1000\n cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n return frame\nestimated_img = pose_estimation(img)\n\nplt.imshow(estimated_img)\ncap = cv.VideoCapture(0)\ncap.set(3,800)\ncap.set(4,800)\n\nif not cap.isOpened():\n cap = cv.VideoCapture(0)\nif not cap.isOpened():\n raise IOError(\"Can not Open Video\")\n \nwhile cv.waitKey(1) < 0:\n hasFrame, frame = cap.read()\n if not hasFrame:\n cv.waitKey()\n break\n if cv.waitKey(10) & 0xFF == ord('q'):\n break\n \n frameWidth = frame.shape[1]\n frameHeight = frame.shape[0]\n \n net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False))\n out = net.forward()\n out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements\n\n\n assert(len(BODY_PARTS) <= out.shape[1])\n\n points = []\n for i in range(len(BODY_PARTS)):\n # Slice heatmap of corresponding body's part.\n heatMap = out[0, i, :, :]\n\n # Originally, we try to find all the local maximums. To simplify a sample\n # we just find a global one. However only a single pose at the same time\n # could be detected this way.\n _, conf, _, point = cv.minMaxLoc(heatMap)\n x = (frameWidth * point[0]) / out.shape[3]\n y = (frameHeight * point[1]) / out.shape[2]\n\n # Add a point if it's confidence is higher than threshold.\n points.append((int(x), int(y)) if conf > thr else None)\n\n for pair in POSE_PAIRS:\n partFrom = pair[0]\n partTo = pair[1]\n assert(partFrom in BODY_PARTS)\n assert(partTo in BODY_PARTS)\n\n idFrom = BODY_PARTS[partFrom]\n idTo = BODY_PARTS[partTo]\n\n if points[idFrom] and points[idTo]:\n cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)\n cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)\n cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)\n\n t, _ = net.getPerfProfile()\n freq = cv.getTickFrequency() / 1000\n cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n\n cv.imshow('OpenPose using OpenCV', frame)\n```\n\n"
github_jupyter
import cv2 as cv import numpy as np import argparse import matplotlib.pyplot as plt net = cv.dnn.readNetFromTensorflow("graph_opt.pb") inWidth = 368 inHeight = 368 thr = 0.2 BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14, "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 } POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"], ["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ] img =cv.imread('image.jpg') img = cv.cvtColor(img, cv.COLOR_BGR2RGB) plt.imshow(img) def pose_estimation(frame): frameWidth = frame.shape[1] frameHeight = frame.shape[0] net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False)) out = net.forward() out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements assert(len(BODY_PARTS) <= out.shape[1]) points = [] for i in range(len(BODY_PARTS)): # Slice heatmap of corresponding body's part. heatMap = out[0, i, :, :] # Originally, we try to find all the local maximums. To simplify a sample # we just find a global one. However only a single pose at the same time # could be detected this way. _, conf, _, point = cv.minMaxLoc(heatMap) x = (frameWidth * point[0]) / out.shape[3] y = (frameHeight * point[1]) / out.shape[2] # Add a point if it's confidence is higher than threshold. points.append((int(x), int(y)) if conf > thr else None) for pair in POSE_PAIRS: partFrom = pair[0] partTo = pair[1] assert(partFrom in BODY_PARTS) assert(partTo in BODY_PARTS) idFrom = BODY_PARTS[partFrom] idTo = BODY_PARTS[partTo] if points[idFrom] and points[idTo]: cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) t, _ = net.getPerfProfile() freq = cv.getTickFrequency() / 1000 cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) return frame estimated_img = pose_estimation(img) plt.imshow(estimated_img) cap = cv.VideoCapture(0) cap.set(3,800) cap.set(4,800) if not cap.isOpened(): cap = cv.VideoCapture(0) if not cap.isOpened(): raise IOError("Can not Open Video") while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break if cv.waitKey(10) & 0xFF == ord('q'): break frameWidth = frame.shape[1] frameHeight = frame.shape[0] net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False)) out = net.forward() out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements assert(len(BODY_PARTS) <= out.shape[1]) points = [] for i in range(len(BODY_PARTS)): # Slice heatmap of corresponding body's part. heatMap = out[0, i, :, :] # Originally, we try to find all the local maximums. To simplify a sample # we just find a global one. However only a single pose at the same time # could be detected this way. _, conf, _, point = cv.minMaxLoc(heatMap) x = (frameWidth * point[0]) / out.shape[3] y = (frameHeight * point[1]) / out.shape[2] # Add a point if it's confidence is higher than threshold. points.append((int(x), int(y)) if conf > thr else None) for pair in POSE_PAIRS: partFrom = pair[0] partTo = pair[1] assert(partFrom in BODY_PARTS) assert(partTo in BODY_PARTS) idFrom = BODY_PARTS[partFrom] idTo = BODY_PARTS[partTo] if points[idFrom] and points[idTo]: cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) t, _ = net.getPerfProfile() freq = cv.getTickFrequency() / 1000 cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) cv.imshow('OpenPose using OpenCV', frame)
0.421433
0.76973
``` import os import re import transformers import pandas as pd from collections import Counter ``` ### Part 1: Preprocessing Remove all special charachters ``` def basicPreprocess(text): processed_text = text.lower() processed_text = re.sub(r"[^a-zA-Z0-9]+", ' ', processed_text) return processed_text ``` This dataset is based on the [CORD Challenge](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge) on Kaggle, to check out how I cleaned it up from source and converted it into a CSV for ease of use, please check out [my notebook](https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/prep_pdf.ipynb). [Download this CSV](https://drive.google.com/file/d/1n6r40XFGlYF9phWP-Hx6Y4QiTqw_I7uS/view?usp=sharing) for yourself. It's approximately 4 GB. ``` complete_df = pd.read_csv("data/clean_df.csv") data = complete_df.sample(frac = 1).sample(frac = 1) data.dropna(inplace = True) del complete_df data = data["abstract"].apply(basicPreprocess).replace("\n"," ") text = '' for i in data.values: text += i del data counter = Counter(text.split()) del text ``` Remove words that are too frequent or infrequent ``` vocab = [] for keys, values in counter.items(): if(values > 100 and values < 10000): vocab.append(keys) len(vocab) ``` ### Part 2: Load Pretrained model and expand #### Load the awesome [Allen AI SciBERT Model](https://github.com/allenai/scibert) which is a BERT model trained on scientific text. ``` tokenizer = transformers.AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased') model = transformers.AutoModelWithLMHead.from_pretrained('allenai/scibert_scivocab_uncased').to('cuda') model.config print(len(tokenizer)) ``` Add new tokens to the existing tokenizer. ``` tokenizer.add_tokens(vocab) print(len(tokenizer)) ``` Now we need to resize the dictionary size of the embedding layer ``` model.resize_token_embeddings(len(tokenizer)) model.config del vocab ``` ### Part 3: Fine Tune Model for Language Modeling ``` os.mkdir('models/COVID-scibert-latest') tokenizer.save_pretrained('models/COVID-scibert-latest') dataset = transformers.LineByLineTextDataset( tokenizer = tokenizer, file_path = "lm_data/train.txt", block_size = 16, ) data_collator = transformers.DataCollatorForLanguageModeling( tokenizer = tokenizer, mlm = True, mlm_probability = 0.15 ) training_args = transformers.TrainingArguments( output_dir = "models/COVID-scibert-latest", overwrite_output_dir = True, num_train_epochs = 5, per_device_train_batch_size = 16, save_steps = 10_000, save_total_limit = 3, ) trainer = transformers.Trainer( model = model, args = training_args, data_collator = data_collator, train_dataset = dataset, prediction_loss_only = True, ) trainer.train() trainer.save_model("models/COVID-scibert-latest") ``` ### Part 4: Pipeline the Model for mask filling ``` model = transformers.AutoModelWithLMHead.from_pretrained('models/COVID-scibert-latest') tokenizer = transformers.AutoTokenizer.from_pretrained("models/COVID-scibert-latest") nlp_fill = transformers.pipeline('fill-mask', model = model, tokenizer = tokenizer) nlp_fill('Coronavirus or COVID-19 can be prevented by a' + nlp_fill.tokenizer.mask_token) ```
github_jupyter
import os import re import transformers import pandas as pd from collections import Counter def basicPreprocess(text): processed_text = text.lower() processed_text = re.sub(r"[^a-zA-Z0-9]+", ' ', processed_text) return processed_text complete_df = pd.read_csv("data/clean_df.csv") data = complete_df.sample(frac = 1).sample(frac = 1) data.dropna(inplace = True) del complete_df data = data["abstract"].apply(basicPreprocess).replace("\n"," ") text = '' for i in data.values: text += i del data counter = Counter(text.split()) del text vocab = [] for keys, values in counter.items(): if(values > 100 and values < 10000): vocab.append(keys) len(vocab) tokenizer = transformers.AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased') model = transformers.AutoModelWithLMHead.from_pretrained('allenai/scibert_scivocab_uncased').to('cuda') model.config print(len(tokenizer)) tokenizer.add_tokens(vocab) print(len(tokenizer)) model.resize_token_embeddings(len(tokenizer)) model.config del vocab os.mkdir('models/COVID-scibert-latest') tokenizer.save_pretrained('models/COVID-scibert-latest') dataset = transformers.LineByLineTextDataset( tokenizer = tokenizer, file_path = "lm_data/train.txt", block_size = 16, ) data_collator = transformers.DataCollatorForLanguageModeling( tokenizer = tokenizer, mlm = True, mlm_probability = 0.15 ) training_args = transformers.TrainingArguments( output_dir = "models/COVID-scibert-latest", overwrite_output_dir = True, num_train_epochs = 5, per_device_train_batch_size = 16, save_steps = 10_000, save_total_limit = 3, ) trainer = transformers.Trainer( model = model, args = training_args, data_collator = data_collator, train_dataset = dataset, prediction_loss_only = True, ) trainer.train() trainer.save_model("models/COVID-scibert-latest") model = transformers.AutoModelWithLMHead.from_pretrained('models/COVID-scibert-latest') tokenizer = transformers.AutoTokenizer.from_pretrained("models/COVID-scibert-latest") nlp_fill = transformers.pipeline('fill-mask', model = model, tokenizer = tokenizer) nlp_fill('Coronavirus or COVID-19 can be prevented by a' + nlp_fill.tokenizer.mask_token)
0.307254
0.740127
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAACrCAYAAABIdcoWAAAACXBIWXMAABcSAAAXEgFnn9JSAAAAB3RJTUUH4goGCgAhuX3C3AAAAAZiS0dEAP8A/wD/oL2nkwAAWmVJREFUeNrtfXd8VFXa/zehig0V7IqFYlfEQieZ+5w76SAIgkhP5j7nTkJCU0RKAoi99977uvbeaFJDGoiou+u77/5293Xb+66o29T8/rhnkjuTmWTm3kkySe7z+RwRZuaW55zn+Z6nHsAjjzzyyCOPPPLII4888sgjjzzyyCOPPPLII4888sijrkdkdsO08Y1/9wc8nnjkkUceedQCafO7gWQuiCeBjFPCP6z3+OORRx555FEUypQA8REgfg3Ee0D8OoivhS5HhH2vYK7HK4888sgjjyLIxz4Q7wXxLhBXqT+3QuOXIMxJHoM88sgjjzyKTkI+BOI6BRz2UQXi3RByPTRjOoj7gWQPj2EeeeSRR12aVGyD5MkguS8KeNhHNYi/gJCbQbwUmjEMxAehcJ7HRo888sijLkeXiZD1cSuIa1sAkHCLhHgLhHGJx0SPPPLIo65K2Xw0iLfHCR72sR6Ch3sM9MgjjzzqqkSyFMSVDgDkMYxd2s1joEceeeRRV6T84kNA8mUH4FEJwdMAAFrQ42MklZd7PGgL/np89sijOEifEf93E6nVEOwH8baEAUTITTizC2ZiTZ7c+veob/iPx9vWoAe6KH896gLkXxD7s2y9O3yFhyMjcDL04NnQi63hN88EmSdBk0c2e+2MiN0acW8Qr00geB4auyFkWdcG9OJjoPNwCDkFgpdAl7dD8MPQ5NPQ5Ssg+S6IPwHxxxDyHQj5CjT5HIR8AsT3Q5PrQHIRhJwDIQugG6Pg44EYlN0zDnTp3Os9p6QHdHMgdNYg5BzoshxC3gPBj4Hk8xDydZD8EMTrQfwhhHwTQr4MTT4DIR8FyTuh8SoIWQIhL4cu/aDABRgnj01p4M6Y7em/2CZn+9w2uyRF+RH5YFlF6SDjAJBxKPTgCdBMH4SUIL4Dgn+pFPc+EH+uiv0+ixh7bWMriF8E8Y0QUkIzMkHBYyFkXwijD3KKrbiFkCeAeJOtaDDODCxZB9/sgzvlOvXZUpIDd8OaEz4CIjgMxCZ0fgJC1ql52KOy0WpBXKPSnKtj8LNKjdB3atTv6tQ19qi5+xxC7gDxSxB8MwTPhy4nwC/Ph1ZyDIgPB/EhEGYfkNEDf4+h8HJl6vM61+wGMg6CCB4HIf0gXgGd37Kt4z2KP7URvK2KwduqZnj7mU1uPoHgxyG4AoILoZs6/DwEmaX9QEZf6PJgaGZv6Ebs+F5r8VcrOhS6MaBNRpY8CXklPTuMbJ61sEeb8UY3ToIuj0oxDtQDl+oRVoB5LEiOAvFcEN8B4nfVQv9CLfY6JRAJKnmuVcLzmVW7wV9A8McgvhdCFoNYgHi+UoSJWB97IOQaa7EXdh7gyLIphHGX9wDJM0FyipqTHSD+SvGy1kG8KNFRpeY8NH+fg/hLtSY+BfErIHkPiK8CyWkQ7IOQ50HwiRhrHpiSOyk7f0XgMPjMERAsQfysesd9StHXtAF/q9U8hkB7n5rfPSD+CIKfA8mbQFwMISeAeDR0eQY0Pgq+4u6tyifB89U8f9UG4/fQpZb6whmqVePRbcib/wIZT6WQf9dmqvtKu0PIHJC8EcQvKYtht22n1RaCU+Uo9kFcAyo+u9NayiTzQPwgiN9XvKpNELzbClxqbbvrShB/BOJfgvhREN8KIUtBnA0yT8G0pentx0+z8f/zzENAshjEzykgbCvASFRGamzWy24ln++C+HkQPwDidRByDohHQQ/2wxXL0pIIIAaId7bRu+6B4IqUd41OHaZ4I69xmC3qZNSBjPtTwEy37byWr+kG4iK1GLdH9JzqCKMGxA9C8CGYO7Tjg0WGTblpPBLEv1Dz0pHmJJarbKfaIHwCzTypXfkrSntBM6Ryl+7swPy1u892qLEamtm7gwJINUhu7BChtRHFPUH8dBvOdTsDCKlzNIbNS4Nf9oPOAZDcqXY0HVmApncaa+OHekAz+0HIO5WLqCMDR6yxFRnm0W3G0zMmA9OuUgHyku4gOUxtmPZ0Qt7WQuMKZJg9OyiA7ALxl9CDp6W+Z4DPUOuokwOI3eKgor7QeBaIP1a+1o6uoN6GME7uFFbHsCu6Q8jxEPJjBeq7OunYgtzSvm3OZ804BkIuhGiI4e3qlAAi5DXILevegQFkN4gXWy7GFD48TvCENnRfpYAForOmTK6aVo5rtOW4rkODh73gkXi1EtTqTgweFoDogZ5tyl+NT1fB8bpOzttaEC+GXpTWgQGkCkK+n9JyK7gHBF/dxuupjQFkmELvrEXdIMwyFXjrXC4Rwe+DgiMb3rkjVQFnFNoX5GNdADhC49M25S/JUyH4g07qDmyqZDSjJMnKsq0BxBq6eWrKZmCReThIPt/GMttGAKLZXFYZ3B+6fNR2KFNnG1Ug3gvBq5EbPKxx0RsdA0TGFh0KIZ/qQuDR+gBiL07NDJyhali6Cn/rQGx0CgDRDKt9RfHxKeg5KD5FuUJ3dV4LRA+O6cTBwsixF8RvQDAhd66VIpqyFZsqvSSztDc0eXMn9sfHsho/DrOQk02XZCshN0+DJjd3MXCugeDZnQJASD6Uuq5nnuqgXq0DAIgwQy84Q+W1dyXhqbZSXo1l0AqtIKJenHqLb4Lqr2TVRXQt8CCugsavtX6GTMkxqrajqovxdxc0vrxzAAi/Ad08NqVkN7QpJX6kHeJprQwgOocU0wKILqeYIna55uvQ5/dXlljqZVzpZl4nz7SKDfLCfK5V+esP9oaQ67ogOO8C8U4I89JOAiCbQSlYlT5+Ua92Cgm0EoAIBRxncxp0XqraILR1IVNVQ0v18BHre22gqHgz9ODQlAmwX5ITUnJHtFH6n73PVahnU6VSBqGis522Z4n8fnWU37udxxoQP9Sq/NVkRhvIQFUUfu1qhr8t8TZZ/N0G4vxOAiC1EDKFXAj1Ie9Otmp1s6vzWCB+7gXia1p5V1sFMmogjGoIuRUi+An0knehz38N/tKX4V/4PLIWPo2shc80jOyFzyOr7EX4S1+DXvI2RPGHEOan1vWMmmYa/SXLpbUFGk9s4NMZk9t/ERLf2krvXKXM6s9BvBWCX1UtUG4BcQWIF0FIE4KLQHI2hJwJIQtB0gRxGYS8Uq2hctUR+XoQ3wzi2yHkgyB+SlXGv6W6+m5TaaP2Zpq7I5oNVkXx0d+WdLaG6gUyFh6qWqhUt0p8wYon7oHgT1QbkXtAfAOIl4O4FEJKCJ5n8ZZnQ8iA1SpFLoTgpSBeoebiWvW7W0B8FwQ/Ao2fhZCvgPg9EG9USnuPjb/RGmU2TVAQ7O8kAFID4ntBxiEpUZZ+6ZiQh+fudrJuWwFAdAZyOB2CF7begxs1IKMWovhjZJU9j5wr70fu1bcjd/mNyFt1LfLLy5FfsRIFFauQH2UUVKxEfkU58latRd6K65G77FZkL70b2YuehD7/TQhzK8horZ5b1SDeBo3nW/q7vn1bVGusq3Tq5GeiWS1PboRu5ELI86DzAOjBQ+EPODsj5awr0pBv9MDYQG/kzD8UGveHbh4PTZ4KkmeAeChIXgyfkQldXgKN50HIZRDydmjyWZD8SCm9r1TDuc+tJpryulbk7y2tsImqsZ5fvgFhLILOY6DLM6DLY+APHgj/bGd9vUZemo48syfI7IOs4GHI5KOhmwOgyUEgPgvEw0ByJIh11UQzCCErQPJ+kPwlSG62NfLb17AGdOnrJABSBeKN8BUfjzmDU8MIOX1Jd8u11l4ZdskEkMyi0ARf2kqWRxV0roIoexm5y29C3qo1yF+1GvnlFckbq9Ygb9Va5F59D/ylb7Wim6sKJNc18G7ygrZdeJPLAQocYu2okrp7qbUK83g6so0DQNwdk6+I/gzrbWZ4VHLh4puwMA2ZMh262Q1C9oAme0Jwb4y56gCQcSx0eTE0nmRVgsvxSectAFDgIofNOFvi74vw8zAI7gXRTDv1FjfJDvk7qRjQgmnQZDp0sztI9gDJXiDZG+MWHgw/DwTJDGjGDAhZBiGHdBIAUcdFyMyU8WJZma07OjaA2PPcfYGRreLvFeY2+Be8iLzyay0LIpmgEXWUo6BiJXJXrYV/wXMgc2MrNLmrhs4rcPlV3ZrwsVVJ3ccfHJrEzI0qEG+GkGVYtzGtiSsnVXzGsZSqaIWzK4S8NbkyIN+DkBkpm0raHGIlk7/tCyB7oHF5yqxtIZe2cfuSVgAQfaYCEuNMkNySRCVbDZI74C99GbnX3IT8ipWWe6rVwSMCSFavQN6qNche/DhE8QdJ7hBcDSEXYdR9aZhzOtrEtzp7Vij17+4kWYp7QPI5kHlxk114VyWNzwLxB0kEjzXIMPsCALof1LX56x5Adiv35WcOlG81iDekBh9kDxA/6aLAc5/LmrwkurDIPEr19kmSYjVqoZd8gJyr77QUeUVbA0eUUbEKeSvXIefKhyGCnybJ9VOlXHNWbm/utLQ2EsLTklTQ+TmIH4BW1tjN1t4CviuSzt1AXJKEg7Uq1eFkJdBX9vB4mxQA2Q0hr4RgTTVxdVJ89wWyi89odz6QPB3E7zjckNwJYQyHkAtc6OwkAAgFgKz53SHk4qT60vUFLyJv1dr2B42YYw38pb9QmVtJCoxKy5TTW7HtScHc0O5lXRLcV7UgfhxCWi1b9NEdq/dXa/FXyEMh5MtJSML4DEI2Bsf8YzzwcA8gn4NkPvJL0kDyKHVufLUDEFoUZ6Cp9chv5kM4XlvjFC9F+wFIqCCO+OIk+dKrQHIbshY/ibw2iXO4t0iylz4AIbckCUQqoRujW33hzRzfDUImw4f8ITQ+19NokYJtnA0hv3Bfc6DqU+bM9niabADR56dB4wNAfJMDS7EKgt9r3zUW6GEd1+xI724D/gftCyCh+oWs0gNVf6sa97EAcxNylt6L/NXLUx88bCCSe82t0EveT4I1UmXVNJinWLHuJO/mR8wLAX5BUgJvGhd52iwqfytcugerQfwexgVU175yj7etASDWta5wrLt87XTuz8XTAX/wUAj5vINnr4XgW2y8bCcAmX9LSFjWJSEQWw3d/AQ5y26zajU6CniExUaug172UlLcQkI+gJmLuuPCea3kO+V7knA+wls4aHl6mOL0CMCY9ORk60n2eNsWACKHqcPsEvcWCJ4DoPWacDZHGYUDHOrdfSCbl6NdAMSv3LKZclRS8tz14CbkLr+5nYEgCfUkq1YjZ/ETSUgk2NPQMkFP8uLMlEeD+FX3nYYD1iJcN9FTaOH8HZOEthJvgIqO9pjZBgBCRQerCn4nXZwfbD8eyGkOEgCqIGQlSpZ2a38LJPuq7iD5oPvCQLkNOde0r+WRs7Ji5JJHrk4fv2ZpUoAka+EzrvPUBX8OYVjxhelzkyl8I0G8wV17bvk2AKBXnqfMGoLnpSH+rnCdfSXkao+/bQEgQ0MW+VqH13oN2SXHtem7p/lDAPKog3W2G8Q3hrnH2w1ArONot7oOmOdcfXcbFQbGtjxGLwp8/89//eHoK266MC2//LrkBNcXPuMiQyIUD3kNVHxQmIJyv3OZ4TI7aF9Dd9WunnEV3T34lksLtBp68UCPv20AICH+kvQ5vN5miHaoSp8QOADCsftKj+BlGwNIdglARm8Ivs21vz/7ygfbvb5Dv2bZkx/XmPX19f/5+eeft2LYfCMtv3xNUgoQcxY9Cc1wuRs1k9fjJIt7QfBqFztkq9qcUuxMhFQhP58A4i3uUrn5BY+RbejCAoDSa7tDyO0O45USy3e0tfvKSffdKhBvgMYD2hdAAEA33RahVUEvexl5ye5jlfBYm1v+7KR6G11+7eOE7JXXJs26yVrwCwjH2VlWVpbggUnaHR+h/L3Vzt1XfA90PqghDuaRnb/5qoGk87oPnWeFxRk9an0Asa55u4ONVQ2I74bgg9vkvS+eHrKY7nSQfVVjJc8Yh4SdjNq2AHKmegHjNleZVyL4CfJWrmuHtiSNI3vF6tFXPVamcOPnEID8898/fnvQlHVFyE0SuOWtWAe9+D13SkWWwF/SDTluDqI6EBDyeNW5082CCWBoTpqnxSJ4awn2Qpfp0fuQOc8LnrcHgFDQ5ywozRtA3HZxkB7npDu0cmtAbEbhZRtbIJp5rEP/W+PIXXqv6mvVLuCRVlBxLdLysiPBI0R1X3/zFkYsLE1enciyOyDMbS6skE3IMvsnQfDOctnocivI1DwNFtMCud3dwVbyHQDAzJM8XrY1gExa3MNhj7svIMzWbxEQitfo5miHVu5GCDm2/QAktPsludK5D92oQVbZL5C/un2D5ucEjR1f/P6++vr6H+uj05+zVj1djLxVSaxYX/yYi2LLvSDD2j2Qi7Re3ZzosPdPaLwDXZ7uabBo8lF6EIgfdyGMuyHk9ci7AilxWFFXApCQS0fjWx14VvaAVHfeVp23S0LvfpVDK/d1+Iy+7WuBXLaolzLZHMY9zG1Jcw05T9ldecfr256rj4MOnrzuquTUh5RXoGD1Sojgew7jD9YEa5enuxM8ucxl7OoFZHNfT4NFC6AHT3JZX7MXOl8BP3u8bA8LBAA06XdgoVdDyM1tY+GW9ALxo866RsjbY/CyDQBELw6Z6HkugoTVyF78eHtXjXefsPrKP/5t/w3xAMgLm/f6oC1bnpYsEMlZfoOL1N46CGn1j9EKnQLI4y5rFB70tFdM3p7vsKLZlh4tL0BumcfL9gCQ4r6ANv84CLnVkRsrq/j0NlhjQ0D8tqMNqF/mR7WS2tQCEfI6560vguuRt+L69gSPtILyteeVPTg5Vuwjgn6ur6+vv/utHbdAuzp5Ljf/gucdurKqIPgZRwtv8uSQ+/FDFxlYldBleZjJ75FtdyjHudhcVUHwduhyoMfI9rJA6gHBBzs8oXM3BFvdeVuzrYnG2Y5l128c2H4WCBA67+NFxzfzL3guCVlXq+G8RmM1MlYu/PePP9bVJ0D/+s+PvwGyMtMKypPTWj532a0Q5qcOJ2wDNONEF0FeNy02KqFzoae9YvI210VmYhUEvw2dT/AY2Y4uLAAQJqdkd14t0B3ESxyssVoQPwwAeG9RewIIXwzijc5uZG6xGiW6ApDVyCu/EvkVpiMQyV23ChMvurneAe348vdPI+PK0qSdcOgve8XhpG1tPL87wYCdNu9wELtpMb4TQvo97RWFMiangYwZLuJL1SB+GkL285jZzgCi8RiHem4HiE9tlfctLwc0eQg0+awD62gvNCMvpnXUJgASHBZq3uVMQPwl76FgzQpX7qf88nUYW+p7bmPNKPivWZlQcDtvzSpMnfFYWmDID32Kz32nvr7+h0RBZMFD712fvLTepfeDpFN3x3JnOxh5rssMrB0Q8mxPe0Uhn+wO4gUuLJAaEN8FIQ/2mNnOAEJmPxC/4miDRTwbADBzUPLfOcs8wcH6qoLgPcgtPaAZXrYBgBD3Um3b6xyZ5zmLH3bdLNF39fKP9+y7sr6+vn7SygdXIWtFvAq7HJcsugnzhn6FwOBvUXjqX323zry9vr7+60QA5Lt//PsJ5K5akLSsLGFucJiN9ZCjyleNc126sLbDV3i8p72i8raXashX5xxAjOuQOa+Xx8x2BhBL393msAX/Pa23xuQUBxlidRD8QLNxyzYBECEPBvFLjoK/giuTUPi3Fumj6R///vf/1NfX13/52z9uwuDCmXEp84KVqzFTvI/AoO8QGPwtAoP3Y9oR67/45uuvE7VCim562oe8VdcnIaV3BbIWOIknVakzkAc7yOCY7TKFdzvGBw70tFdU3h4A4rtcZLjVQPByTCpK95jZjgASqrEScoLDWotXQIHk9olraF/CTrrv7oPgS6FLtJ8FkhEI9VCqdnCjaojid1Cw2pX7CplLr9r42W8/tCvzB9/d9TSyV7R83Qnzb0VgyN8RGLxfAci3CAz+Nyb2Lvrp55++TxBDfsSQwCTkuw2oV5Qj75pbHWZEVYLk8MSDvGapy4O/tnmaKyZvDwTJR10UitZCY6/5VapYIBkzD3YIIJtBUaq93dIJmT0cNk/c3uJms00sEB+fA+KvHFSe1yJrySNu3VdpBaum//TTjy9EZEetRfYqs1krJG9NOeadvxeBQd/bwONbBAZ/j6mHb/jTX//8r0StkMnXvTgc2Svdd+sdv2YFyNzmYPL2QuN8YFKiu+RyV92Thdzoaa5mLHQhX3ABIDUQpuExMgUAxDcv5JZ8wlE6LyXxmOeGdvOc7cB7UA3ix0DG4e0PIJp5mbMArFGLnBWuztfoNXHtyj/+bf870Wo0PvvvPz0HfXl0cMq9bjkun/QCjFP/GQEe1ig85V98w+JnEgWQv3//z29w1LRL3bd8r1gJf+nrDqyQPSCeD83oltBi1OVtrtq4C6nSFL02G1F4eygEv+mixqYKQs71+JsiFggACPMSq4VQwimzd0HwgUmZxyMnh+Ifdznqviv4yhYbsLYNgMhrHCCg1bokz3nQOb2g4kYMmHNacwodGHpWt/EVNzRxEY2/6noUnvX7qOBhjX/h8iPudZDV+4+739z5dEzgSsSNlbX4YVDC54XUQchbIcyeCQqdGxdLFYR8w9NcsQCE+0Lwx64O6tLlTI+RKQQgucaBENKJ22gzMucfiblJKunRi3qCeL2D99wCMrMBAEsua2cAEXyfA/dHNfSSd51mLaXll1+LY2dkxVHo93ecOm9qWG1I3rUrMFN/X8U+YgHIfsw+4Y9O6kKe3bKxClmlq13Xg+QuuxVk1DlI+fwFiHsnmMXxsgsFVw3ilzzNFZO3h0OT210I4k4QX+4xMoUAxLrPQw703j5oxuikvWumHOXw5Ne3oQVVm/nydgeQVxPfvRo18Je97Lho8HRDfvvDv75tSZn//NNP9W9u2PEpRi9Z3PD7ifNvxbxzv44InDcZaYEh/7roukvzEwGP//z0YzU0lKZNnf6U+6r0VWshZE3iylx+BCH7JBbodXXUqlXo5lEs3h6heFvluMaGeIrHyBQDEGInrvvG7rz6jGS86yJHAX0hH43z+m2Sxrsp8d2rUYPsRU87UqxZK8pLHnyn/Meffv5bfEr9569zKp5ZgbxV5chfVYErCl6xpe02N74fdu0ljycCIN/+47uvMOXge1F43m+R77az8Kq1Dpq3VYF4JzQ+MKEzs4nfc3VWRbwLsusCSK0L/m6HkBM9RqYYgOiBgRBcmeC8VoOSlHCSW9wTxA87SvfX2Mr/zQu0I4BkNGQB1CR+E6MWWYsfcqRYhy8wq3/zx+cTUewf1329CP5rrkbBNWtReN5v4gSQ/UNW+n8XZ3PF+vr6+vpvvv3L73DZYS/CGPhvTJ/wMnLXOU9Rzlu1BmSuT5i3gr9AQXFixYQkP3Z32BHf13YKwmwvV5RDAJH9XNbYbAVxQafmbUcDEH0GQObhIPmEA9fv59DMM5IQWxusPAeJK/a8kn5x8rJNKtH3OUvhveo+J4p13h2vP+okNjFyyRNFmHb50zEzr6JmYw3c95s//255vPf47V9//xdMO2IDAoO/Q2DIXzF++bXIr3DY42vVGoiSDxxM4FcYMyexgiXLitzluE6B+I62VRKmBBk3gIzr2mCsgc88xvmzyn4Q0k2bmC0gzmk73gZPBPHyNuLtDSC+Aj6zV4cCEAC4ZHYaiBc7yF6sA0nVubDezXs6Ue41IPliK9+jjQAkZ+ndToLL1zz18dVOAOR/fveH9zDjzG0IDPohfgA59b8f2fzSC/He48tvvv4O04/8siG+Mmf0NuRdu9IxgOgl7zvY4XwJYSZWjU5yizsAkTe0rTVgPKnSKGvbYFQiSw5x/qxmf5D80gV/N4Ok3oa8PQfEG1SAuLV5+xkE34bsBGN2qQAgltxkOwhiV0Hjd1y9o8/oDo0XOij+3QvBV3QOAMldelfCBz6NX736/77/54v1DglzTipEYNBPCQDIX1a+dsd/x3v97fuq63HF0X9p+H3R6X/GlNkPIW/NqjYEkK8g+JQE/fTbXSi4Omi8uo0B5FG4O/wqsSp7dwByFIh/5eL+m6Cxrw15ezaIP2oj3tZA8M3I6qAAIuTxDt1I2xOW0QZZDYS6GzznIPX+M5DZP7UARCRcUGMBSPZV9ySafdXn0usW1bugv33390pM6PlSWmDIP+IDkUHf9Vt48Wv//M+/auO5/ti1U8tQeOqPtt/vx5yRlRi/bJ0LF1biAKKXDGhTABHSA5BYlC3dA4iQHoCkIoBY93vYUWq2n2c5zsYi41gHcbVaaPwsLqpIx/SLUwBAQv3jrclKPIiefeX9CSrVtYdMXz7RDYD89NNPP137wl1fY+6Av8VphezH9KM2fP7HX38Vl4WTlxZEYPCPYdcwBn2Py2Y84swCCX7iKAZCxnEJAshWVy4sIa/3ACQmb/uD2J0LS0jhAUiKAgjJmQ67897tPIDuqPvHZyAuhOC0uGMvbePCkh85SOOtRdaSxxJUqtf1L/YPr3dJf/z2z7W9g2dvaqkOpGHMOv5/X6x85zfxZGJh8iH3ITD4X5FWDArP/Q0KVqxJMAtrLcjc5AhANHlUgkpuoysA0eWtHoDE5G0/l2etbIEusz0ASVEA8ZnHOrzXy9CCiSVn9J8WesdHHKz/avgC57RBoD7hGMgzib+MUQN/2XPxK9NrV2Lq9Of7X3nRxfVJoMW/uP49FJ7653gAJM0Y8m+Mw+qffv6pxXReTD18AwKDv29yHePUf2J6zuvIvX55YgAiHVh3/Bn8RQcnuFDcpfFqfLcHIDF528/lWStboXGeByApCiCWDnzFgat5M8hMvCr90kUHOOr8QfwCdD46FQHkFgfZANXwz381oVYm84Z+3Tt49sP1yaHfH1Z2wR/itEL2Y94pf39m++sFzRW919fX1+Pyfv8T85pFp/0Fk8puQd7qONN6V17noJVJNYTcACH7YPLkRLKwPoC7QsIHPQCJydt+SuCrHN9fyAkegKQggIQOYhJyjoNNQh1IzkXBVYmClZPuuzUQvBqTjPTUAxAhSxz1oxfFH8cFIHlrVuHyS59H4LS/IjD4/zDnpLX7//n9fW4R5Izy3Cubuptijn9j5jE3Nne9U67JPAuBwfXNXmf2qO3IvW55nL2w7nTUC0szfgHNOCAxn6p8x10hoXzcA5CYvD0CQlbDTSU6yUkegKSwBaIF+zk40qIWgu+EHue7Nx5odYcDa2c7SE52wMs2OQ9Ed5TKK2Ql8le1HBeYcNX1mHfhnlD1eFpg8H8w+4Tf49KD78Okg6f8119///N/fvrP/kQB5E+/+8PnmHPif8cdCwkM+n595earYl2vt3mW0SSA3sQKOeMbTJnzIPJbskIqVsG/+AkH3XhrIeQD0GVihVmafMlVM0Uhn/UAJCZvD4fGW+GmF5aQl3kAkqIAogVDluaLCabVVkHwRuhG/Gm1Y2Z1g3AUr/wQenBgagKILk8G8ReJA4hRh5yrb0d+efPKdOrlT0RR8vuV9fATphxaO+n+4k3f/OmbbxOuC8lCWVpgyI9xWiHfnbd2/NuxrtVdnvEaAoN/aCktGLPHbWo5oF6xEtmlTs6Q2A0hy6GbPRJcKG7auVdD4194ABKTt27bue+E1obdeD0ASdwCEQwILnJwRsiXEMbIFq8f6mun8SjVmSDRzfpzDnnZ2gByLqDLviAnAmLUwF/2IgoqYldq56xehcLzf91C76rvMPek/zt24ai3rGDEzz/HCyBPvfvSx5hzwj/iLSzsaZ759Z7ff/lq5HX++vs/1XeTp/0pDmtmPwKn/RWTSm5vwW23HMSfOjqRkOR0aCUJCoB5u6sDpYhf9wAkJm8PhZBuDpTaBeKZHoCkMICMWwlowXMcbML2QKjuvM2l1qZdEop/LHTQfbcamrSanM0dmmIAUl4OCO4D4nsdCbQwtyN/dXQAyVu7CtMuexrGwHh7V/3rxKvGLkzEAvnrd//7b0w7Yo/VuyqeyvRT/rbslVu+ibzOveufNRAY/H/xPefAHzBr3MZmemSVI2/FDY4UjuBq6HJU4kqDK1wcaVsFwR8CQEKB+y7jwuKDofGLriw8wYVtxl8PQBIHEMuFdRSIX0xQbqtBcnNc188t7Q3i+x0o9FoI49QWQar9XFgl3UAy6CATy1I+OVff3fRc9IoK5KxZiaKz/4DAoP1xtx4pGvjnW959eFbcR9D+Y//3mNL3zZZdTzZrZ0rfjVt/Xf2BPaX3sgfL3kBg8N/jfs7AoP2YcPV10UGkYiVyFj3hqE0z8YfQZeKdPoVc7HD+GoN0bUlkPK6etzrOUdWOFshBEPIJFwBSC+L5bWyBfJIAb6s9AAGQVdoDJMsdzPM++M3T48i+GgjiNx20j3/fCXa0HYBYuyyf40nTy15Dwerwtuc51y3H9LjP7QgbB8iz6n79x//6Ii4A+WH/D7is77tRazdi14X8Bz6s+eHb73+ur6+v/+d//vWrAUszf53QcxoDf8AM8UHUupCC1SuhF3/g0JX0HHzyaAcAMs9ly/HtuDDQvc2UnJB5EFwEwfPiGHNB8jEXFotLAJF9QHyPi/vXgvgqHDO3jXhrHgnB0+PmrZALlQu76wLIo6tDVsil6gCwRNN5F1ub8RnNWTiao5owIYsBNB6/kZIA4pengvh1RzcS5ibkLr+hMZheUYHxS9dh3nm/SRQ8VAPEv/dfNPzTn37+6ceWAOS7f/5Qj2n9/iv+TKzGewxfe2nV67UfTTirIvf+eIsSw8cZf7aC6WFWSDlyr7kNJLc6cyXJmxwJnU/muix22w4fH9FG4OEkE2qhi/dzByA+2Qsar3XhIqwB8Wpkco+U5K0veAyIf9mlASTU1kkzh4D4/cQ3frL57rw+ozuIFyS+yZNfwR8Y4IKXbQQg1s3ucnyzrEXPNCjSvDWrMG3iCyga8jdHAGKNHzC9/5stZmFNPfwaBAb/x8V9/u7qt1fkvRp+6FTFKvhLX3ReL6BOGkt4oRjnuWy3sQPEg5CqpMnFLiwslxaI0V0FP3e7AJBbQNwnNXkbPEFVYndtF1bj/Z931K7Gz6fGjDOT7AOSzyboHqsByddAskdqA8iIeSEf3eUOzDe1cw5+hNwV1yO/vALjl12LOSN2OXFfRbia/onLDtux6BfXL/rHv//5ZX19/X8amir+/NN+FA2ck9ZS4V9rj3nD9oWfg778BghHDRR3gXgjBJ/maKHkFB/pKBU7rFbBGOcBSBSauCANQs52cf9qED8KYRzuAUgKA8jF00N6sMSB/O6AYMt/NTNKppSfj3awfvaAjIUQCVaft5sF4jMOA8nNjoUka4nVsfbSwD0oOu1vSVLS32Peyd9Ax5pbPni05smtr+589NNffHXu6oK3UTTw23YFj1Bh4WWzHm5ob5K1+FHnQUn5hnOpOxIuLZCdIDnNA5CYAdDxLgHkFZA8xgOQDmGBDHYW75K3NRNjnuygWLsWJEfhHImOASDWDa9zGCysgjC3I3fFWszyfYzAwB+SqKj3IzD4H5h38h8x96SvMfek36Fo4P52B49QYeEM/T3kr6pAfvkaCPmpwwmrheCgUpaJzVljW/5NLjJqKkFyYYPJ7QFIJID4HOTv25Mj1oPkyR6AdAAAsWJJTg6Cewl6RHfejLJQAD3RJJAqFZM+0SUv2whAGoJIxSc6O2CqocHiOzAG/SXhoHaHHYO+w7zzv8QlS65DzoJfgAynqZ67oZce6nKxPO6ytuJOzwKJydvzXWQqWe4IXZ6F+g0egKQygIT6VWlOAt68CcQjmlzzovIeEAkngFSD+GZQYa+OASDWQlI3NW92IaxVmDHyDzAGdREAGfwtjFO/w4TAUy7qBOpA8g6L98UuFIGxzGUq79PQi3p7ABLVvXsSiF91wdvPQVwAYXgA0hEskAxjoAOX0x6QnIWMhWlhljwZ2Q4SMHZCsMr71juQCwsAdGOwMted3Th7di3mnPdnBAZ1BQvkWxSe8X/Imelm5/8FNHm+a8Hzycku4yCvIUsO8AAkmkIxDwHxEy6EcQ90vhJ+9gAk1QFEXgagHhD8boKbwloQ3wHiA4B6oN9EpU/5TgfusPXwB8917VJucwDxG0AGd4cmlzlPW5S7kD9lLwpP/79ODyBFp/0dE/O+cqG0a0HyKQjzAOBCd4JHxnkuj17dAJIjPACJKYx3uxBGa56ffxM4Y7IHICltgZQDOUYaBCe65qpA/CkyFvQFqxOpR83oBeIPHTRPfCNJvGwHC8QKGp4L4g8cKyNfsBKTs3/T6QHkipG/B8ldrgBEM9yfVpcxGxDyBAi5w52Sa8OusR0FQDJmhwKrS10E0ndByL04+7JeqcdbD0Ca0OBCwCcvdhAP/gLCaNyEkRxpnVyYaO80cxkAYPKCDgogFogsddVozTd/J6ZoXyMw5O+dL+4xaD9mX/AnCKPKBYBUgfhZCD4yKYKn8xEQ/JyLTKxaCF4Lv9E75TKxUsEC0TlfFXs6j4PoTK7dEh6AtD6AWPrveBC/kaAC3gOSq2wAMt/BpqMOFDyv41ogocLCguKDlB+wyp0lkvVrFA3+e6cKms+66BvohVXQXFkfO0E8L2mCpwd7gXiNi0ysahC/jSx5OIbM8gCkKX9PcHSWQ+QJdgBSir8egMR6jt4gvinBOEg1SG4CAIye30t1Oa9KaFMpeGMSedlOFsiwUAKAORQk97qyRIh3YXLWbzqN5THrwm+QNdt9K3Ih3wVNtVwaBUlqtKfL2S6ywazzSHRzrOfCiiWQ8n2X874DeaWHeS6sDgAg1rPMdmBB7IUoPgM6D3BkwWjyGgCN57V3WBeWboaCs05O6gofmlmJCRO+cNl7qv3H3KF/RtacZJxjsQeaMdri84zkKQOS41Q++i7Hi0bIOz0AiYyrlof4e63rc0yEnO0BSEcBEDnUQf1PrVVHIi90sJn7An55bse3QBoze4Bs2QuC73V5boDlzsqZvhtzz/lLh0zXnX3Bn5TbqtI1eOh8MwBgQlZylQGZxzruqmwP9urFJ1nKpdADkHCAznC9mRLyJWSV9fYApAMASMbV3R10Kq4C8Tsgw0hwzVZDyPUYUdEDPLiTAEgjEp8A4tdcHuyzC5qshH9ODS4f97sGxZzawLEfgSHf4jLfbyECbmMeIYH7EFnF6syP+uQrBJJ3uH5G4kcaL1jvAUgDVaSBeKtLOdgO4ktSh7cegESlyzJCum+lg/muVG3hE/ndbhCvggh0S9q6SBkAsayRUyF4s2sQIbkLwqhCweS9KDzrf2EM3J+y4FF41v9i/KWfg4xkHQG6HbrMTZqPM2y3ZIYWTYGrdNPGStgcAMCkfA9Awvlb7rLiv9o6OKy0b2rw1gOQ6KSUuE9e4DCu6ODwKFPHlAokkZcpAiChHjGZhYNAsso9iKjhD1ThijH/D4EhqQces4Z9g6x5NS7rPCLdF9cDy9JaVSH4l/RyIYD28Uv45XFh89/lLRAAZA512To/dNJcaRP58gAktVxYjR6YbUnTebHB5j1oST6TJ6UskMyikABdrLrOVieFeb7incidvgczLv4jCs/8X6uP1qD2iXMEBn+LOef8BRPzvoIvuDOJC6Qagl9H/oK+YbvZZFOol5aQN7g4Qc/eo6sCgg9ofyWXKllYxYAuD4GQr7rMdrPO0ham1u4g4gFIbAp1DRCywsWBYvFapffCLw/qvAACAJpSfLoUELwpaaismZXQgpUomLIXUzN+i7nn/BXGwO/apinjoP0wTvkOc877C6boXyNrTi00szKJi6PGahNingkAyCpufaXgLz4RWlIW/G6QLMdwnyWQmtG1AQQA8hakg2SR62wsS3Z2guQEz4WV6hZIcCiIv2pFAKkEyfnJ31CmGoCEPZx5IUjuTK5pJ3eBjF3ImlOLS8Z/iVkXfANj0H4Yp35nBbSTWdMxcD944HeYO/QvmDj+y8baDpls03QHBFuNrsYuaH2FMGG82tHyzUnaNVVD8MPIDPT3XFgha4FPB/G7SVojWyC4EZlDZ0h4AJI6AKKX9ITgDUnzukRvBT8q6fOfsgAiQgFF42QXhyjFByh6YRUm5n+JWRd+k7RYybxz/oqpvt8iZ/ruJKTlNl84psnxGLsGyGtjF4VunAuSyTS7P4DPHIGM0nQMzunaAAIAJG9NIm93Q+O7kVHWt8l9hgU8AGlvAPFxOjRe3YpurPcwZkrPLmaBqE2TxgNA8oVWRGfLxeUr3gnN3IX8qZ9hUvavMH307zHr/D9h9tA/Y855f8Hcc/+KueeEjznn/QWzz/8TZoz4Iy7L/C0mFnyFrNk18BXvhC9YmYS03JbcE/PRHlQwF9DNA1V76Zrk+WnlPmh8NzKLhsHP/YGjot87UWpJSWpyISjhQ3laB0AKGro0nOuytUm0mFMtNKMMWmAQRODgpPC2Jf5qwWM8AGkJQIoBksLFGmxerjS+0brPvC4EIBYyhwS8PwRfDSHrWjlboTFe4iveCV+xtdD0wmpkza5F9qy6sKEXVjd+N7jTim3IXa36fJairQJxY4l5shdG/NkjF7usTI+xsLgOgp8B8RJoRg6Iz8CEwAHJsZwCh0GXgyHkSAg5HoLngfgZF0CYfAuk/yUhV9aSJCuVKmVpbQbx3SAugmaMhe7yaNMQZS/pDp2Phi7PhpCZEHIyBF9lHbnrAUgLcZBTVWv2ZOu33fAHx7WO/Kc6gADAVcqvnxfoBpK5ELI2ibteBzEU+2jzZ7CsMN0Uth1eu8ZIoctrWwnUq9U8b1Gt/1+F4Kch5M0guQCanAHiyRBcgEz2Q5PZIL4ExFNAPBMaF0GXi0ByLUjeA5LPgPhl1T/ofaXUQkV7bqzb5ANIqM12RkVPCPlOK6z3KnXNKhBvtCqb+WUIfgRCrobGDDKng4xJIM5DJvtBMg/Ek0A8DRrPBrGELq8GyRtB8iEQv6AsjbeUItwE4h2Kt1UegLTksjQPAvE9Sfa0WHxvtQ1kRwCQRjeD9WdW0UkQ/GKrWyKpOTZBSAs8kl0omDCpQqhxVxyodrRtOR9VCY7WfJbWiYGE+KsVXdTKKZ6pzNuuAyDWs5Ukmae7QfIma1MyuYsDiN1He8eINOiyRO106jo5mFSplM7nIfgMpBKFXGeacY5qoVHdBUG9lQBE8dfH6aCkK5aOMroGgGSUh+K9o5RFmCz+fQHiMZ4FYie/YX+B80HGGqW8ajqlABFXQZhL4QseAQCYNA9AeeqASKh2Q5MT22hX2nUAJESZJQeC+L4uCNBdywLJL+2jzkhKzsZT8Fbo8w7yACSS7CmrYl4PZPIw5SvuTMJTCyE/gSYJeaXdLSVtImXJv7g7SF7ZBZVc6wKIuELFmub3U7UhtR6AdFIAsZ7vpiRtwupA8hZQsEdSj3ToFADSoLSUQvXzcSB+uxMJTjWEXIecOVZb7qklSGny2woYSa71AKSV+DteHgiSr3chkO56AEKSkuRN+QyCJyJ3XmuCXQcHkAY3Cs9IUoO/9o5zbAHxo8hUleUA4A+gQ1CutC0sebWaj2oPQJLMXzL7Qsgnughvux6ADMnuloRu17us9kbyrFZdk50CQLLMQyD4/g4aA6lSef7bIXg1KJgBf6lVMZrJ6HBkb+Ko8xSIhiQHD0CSyd9ssz8EL4eQezo5kHQtAMkoDD3jHS7lphrEj8EXOMwDkObo4iBA8ixVWNfa/fSTHRzfC8G7QHIVdHka/IFeDe+VMwEdliYvsFsiJ4P4SZfnW3gAEkaj1RrhHiA5RqVQ7/YApJNYIJYbywfi36rNpbMheGWrL8UODSBPA8goS4MmlzlQUFXw8Ueq0KktXV87QbISgh+BzlnIReNCLC9HpyItzKXFILndA5BW4G9O2RFWoWSHduF6ABJNbtzSuKLW1Ssd3gIZu/hgtRiqEgIPIXdgbMlhyOKTQTwVQq5TO+XXQfyJApY6NUKV7zXKNIw1Qt+ptf3uUxC/B+IXIfg2VSV9XNg7tFaGREpYI+XAAUco87z4MBUbeUvxprYDpvxW2eY6NM+V0OXgduNviHx8EYS8X1XY13VQ11Ykf/eC+BboXcwC6SjUYQGkoDRk6hWqRZZYdaaQa5AdDO9OOWpWN5AxALocDo1zQDwbGl+l2pY/oPolvaoAIXK8DsHPQMj7oPH1ELwIZE4FsQaSZ8Mf6Ns0sybQNRcd8UAIOVO1bdip5q82BRVZjXINfWYpDv5S/dsHIH5GpVsuhMaXYRwf0n5xp7JI/o4A8QK1admnrPNUA5NqNed71Pzvg+AvofF2EL8JwQ9bMUE24eORGMvdPQDxAKQ1fIUbHbzAdghJ4a4jPYYJaHYHmX1A8hAIeRiE7A/io5oMjftDl4dBNw+GzzgAedwt6vU6m5sqEfIviFR0ByGzaACI50CXLyhl8rlS2tVtqMR2hyky4i8gZCUEvwQhbwHxfJAxHoLPBckTofHREHwY/NwH2cHGeW7vpAc7f32BbiA+HJmFZ1vBdv5I8fazNrL8qmxWhB2E94H4cwheD8FPQMjVIJ4HLUDwBwfDx8dB4yOhy0PhN3oh/8nWUHrz1XN86WD8P5Cc6AFIAy+zQfwrh7z8GmQ82Y7gwZc4FITnoKuqbo/ajy6eHv73y67sCd0cDuJFEObLELxddRgIxal2qvTGymbSHCsjRuh3O9TYDuJt1mmX8nkIeSuIF0M3p0HIcdCMgRg6uU+n5G/W/H7QZT5IXg8hP1S8sPO3MkH+7ozJX5LvQ8jHIOS1ENKELieA5IXILDwOh0/vFpdLrnUUnid3qcZLaqdTSEH8tMOjHa2tWoHmLYJUp3HFR0A3zobgLAg5F0KWgvgqCF4Jweugy9tAfJeKL90A4jUQvNICBWlAyGnQZS50OQY+eRb0wFEYfmfLu8f6hv90fCovBzCp6b9Puj4dGfJ46MbFEHIihGQIuQDEV4O4Arq8AULeAeK7QHwzhLwOxBUQfA2ELFPzcSn8UgfJi+EzByGr8NC4numJTsRfjzog6eZFjs4YsHa1VhA7o9zjY5emLjr/rdGVNSoAe/LlUSpS7txuIL7KQeFgDci4w2OgRx555FGXdWsU9QbxItX2I5Fsir3Qi8/2GOiRRx551NVJn38QBM+B4KdUD/3aZrJ3aiHkM9bvgh7vPPLII4+6LAlbzyWtrCfIHAnBS0D8PkRDvr4dQPbBxxke4zzyyCOPPGpK/kB3EB8JCuSrs5k/s1WJvwKND/eY5JFHHnnkUctE5mgQP6vcV4ysxekeUzzyyCOPPIpNwyJahAhjKCgwwGOMRx555JFH8VFk25B2q3j0yCOPPPLII4888sgjjzzyyCOPPPLII4888sgjjzzyyCOPPPLII4888sgjjzzyyCOPPPLII4888sgjjzzyyCOPPPLIo65OTc4KL/d4kiyanETeXlLUtnND7XgEqZCxFmv7zk2s55pYlHpykxXr36Unl6lOog3naFJhAmt3xGK7gkgHcU9o3BsFxX3gk/0xorgPdNkbxD1BsvH85Eu5aZsSjxop1zbhUzgNxN1B3Aua0Ru5Cw9BpuyHUcUHQOfeINkLgntCM7tDmOkYvUwtGrYvoL4QcjmEXA0hKyBkcas9++VzAF2eCyGvU/daBeJRKSBEk23vfyMygke5npt8TgNxDwjuBTJ6I2dBX4yVhyPDPAAiNDfSmhtdpuOot6PNzbkQcpV6trUgKVJ2XWYGj0OmHINMOQo+ORJZ8pgOI1O6TIPGPWKOsso4N0KBdGjcM8o1eoLM9u/RR9wXxHZZD7aiTPWEkFdDyDXqXlcnMCEzrD+z53WDj88DsQnip0C8HcT7QPyV1Y5dVoL4RRAvhJ8vxpiiXh5CJLAY/JwN4rUgfhfEdSD+So0vIbgWJD+G4GdB8haQLIYmc6Bz//CJ5mPUEcB7QLwbxK+22jMPuwHQZB6If63uVQNhzm5/AOEbbe//W+iBgYnPR8A+N8fCz5NBfDsEr1fX/hWIfwUh90FwFUi+D+InQPI6CC6CJjOgGwdFPFee6jK9G8Sfg7gsddejLATx10q+d4N4agfahR8H4hug8SNRxk0QZQc2b2EuUNcxpkLjJ6Nc4xmQvDgFdMax6nC+0Fr/ZSvK1AEQ/Knqlr4bxJ/Gaa4rZmbN6gXBK0D8qXrgWhBXRZzfUaXO9NijXux6kGG1Y88wPZCIpIJSBdB8GogfUaCxWymZaLytVnyvUzz+CsR5ERN9NARvsv3+pVZ7/guvAzSZA+K96l47IMwZKQAg62x8+xLCONXFbjYHxC8phV8Xx9zsVkK2DcTnRDxXttp07VLfLUlhAJml1tgu9S6XdhwA4YEgflvxOHJsBrGl/DPKW1LQ76j5jLzGr6AZ2SkCIJ/a1uMLrQwgn9gO//sk0Ye9X4FDlU1odivl8bn6c0+EcNWA+FkMD/bw0CIG5c8/Chq/H3GYVpVNEe1V4zPbTiO0qPdAyLEegLQCgDz5DKCbY5XCsa/pajUPe2PMTZ2am08g5KAODCCzlVzXqM3g5A4EIKeC+I2Yx2QLbrlDq24OaOaY7X3QjCwPQOLfhQVtu5FdIK5UCD8Tunk6xs3rDzIGQ8jJIP5lxJnne6DLFR5SxCCN71VKJ8SvnSD5SxDPgSaHQ5ingYyz4ZejQZwDMqZCMwwQr4Lgx0BFZ3oA0goAQvJAaPyLCGDfCcEPQeNLocnzIcwhoMB58JvjQJwPMqZDM4IQfC0E3wGt6JgODCAZEPI2CHmTFeMyR3RgAKkM2wBofCsyzAOi/vbaBuU8O+L3lR6AOAEQX/BICLnVxrzdEOb1OD07vZkXWxamFAXXggInWR/Wx3HT+vCvlZc7fOvyFv6eIGXMtrn2XFwr9D5CDlM+Zrvlkbivz+6v72wAMjli43vG5HgWu3MAaZybCcpFGLrOJgiZ43JunAGIEx5EI/vv8ppJbLlqVgy+mm0rb3Hri2YBpArEr4PkiwoEqiDkW9DNE1sA0Idtv38Zgl9OHEAi9JibZKJoc+cYQBzMU8IA4psXYmShzaKoAfELGDvHQm9fIPpv8BhsPn3FdBW1n362xcgl853ujJr/PLskccEOTe7Wpc6eKTvBjWSv/NACuMU2Ibsh+KaG7+Q4TKhIBEAogQXtDyYPQCbmR1EMDhSFVph8AGmcm5ds1kctBM9Pwty0ACD1wNtJBnk9gZTqTPXd6hgJNtHO5nEqb80pVI0t0Oz2TjIskGqQfAAkp4B4R4OsEY/EqBi8yQoeD+I31XfrQNKEkCttMhUbQKgNM06JD0+6BRLr+R1ZIKefkgbiu23CuBeCx+PSEiAjhvCGguWZgREg+YXtt89j+Jxu4YDDo6FxEBpLaFwIjY/BuZN7KFfY/WoRvAmNHwbJaSDTyuoKBfabTLzKWJ2yLB3E00B8PwS/odxtr4H4NuhyYsP3Cy5rqrg0NtTzBKHxFIt5Rj9osgiCHwbxW9bikg9CyMsdL5zsol4q2ypkpX0JCliKLqPMjakZJ4DU2+fhVAgugy6fhuA3LX7Jl0HydmhmPiYstPiexy0ByHbowcuRFTwFgudDyCcVv95S2SwMv+wf9XGuCQEVHwld6hCyGELeDI2fVevgbcX3x0FswDf7wJi7YrcuLL9xuFIyobn5DONm9kzC3DQPICEeTFjcG8K4CLo5G0JWQOPHQPyK4sHbIH4ZJFeAAqfH5MGwABAsC4HIQAi5VB0V/ZYaz0DIWyDkYhAXIIOPiQCTodC4RMmCATLOjClvWcW9QTwLxA83rB8hXwXxTdBlY5LH+Chx+IsDB0HjMpvM6eEAaFwAIStA5uUQwW4OAeQx+ORRKhnAcqsLXgSd02P8XleKeReIt4LkOAi5rEUAuWykXY+MguBV0OVLit9vg+TzILkawmz8Yh5HtzhPv11dxzzXen9+Uc39GyB+EiRvBPESkNzWIoCE5skf7KNcc49EzNMN0G3W9YRJSQIQ4iNB/LxNGDdD8Fkt/u7i6aFUuk9sL/cWdOPECGviShVbqQHxNgh5NwRvjZLhFQoqv4ts82QAwOLLbYssYGfWWGvSuS5GtkwtiD+GLi+M8c471fPUQfDb0OQNILk3IoEgdK06CPkuqOgIB7uHUxV/rEUu5LsQ8pAwN1lrAcjlo60/x6zsBTIqIFQKbnR+7QbxR8jiizGqQm0SymMByC61y6uLwa8aEG+FxhpyS9OaPruRoVJHa9V3q5uZw50Q5vBWiYEQj7IBSC2EfK6JC7NVLBAAeuBwFZSva4YHoWD+XghZFPN+h85MB8k16nrVUeYjlD1WDYoILFtB9L1Rg+jh8palPo8mb6HMtLcgzDOjbrQy5x8F4t+o++yGMG9UvBLQ+M3GZBJZAt0xgDyBzPm9QXxbw9oS8jX4zejXE7LM5j15E1rxERByebMAEpKpzOCJEPy0uk91zHkjfh55fHzjxlvJ1LGXKWCZ2QO6vBWCP49yndC81UT8+wsRa8luVeUoF15tM/P0OoR5WtR5cgQgggcr9Aw99C+hyxMSMK+ett3wI5AcFrFIl6gHb1TIIeUdnm20q8GcFLwBY+YfGtXtQWY+RIPiCl2vTi1w+7VqQLwRxOdjfHb4dRoFfJctZ7/Gljq7O2LiqiHk47ikLLFMM585WmX4hJ7nMZA8MAnBruYBJKQEfYt6QsiHbDGYKhvPa2zAvksJ3B4IVfjmv6w5AKmKMod25WItfH/x6U38r355vgKQ0O/22Ebo79WN95Efwhc4IekA4jNn2tbKbhBfm6RAZMsAMq20p+39Y/HAtv7kr6AFqAnACaM7iNdEyE+1bV72hM+NnBEhmy2n8ZJ5OQTvtclb6HqVtlT/0Lt+CDIGNZFbC0C+sin7Z5T34XPbe+4ByfmuAMQXPBRCjlHXtdZFdvD4Jjt/wYeC+KGGewt+UIHKipgAEuI7BY+B4LdtfKuOSAO2r99aCLkewhzSIFOhuOrEYHeQfEwB/y4bD/dEZPzVNgsgjfM0UwFRrHmqaXTX8fsg4+Qm8+QQQIbZdslVIH4Wujw6zkk8FMQP2B5uM4SkZgBkl1JEb4N4HYiDIC6B4PvVIq1sFGi51hK2sfZdw0kK7EJM2gEyVoDM8SCpQZiTQHyPbRHsBvEjOPu+dFxxUTMAIndbyp2XgTgAYqlcWXXhAVYenZgykQXKUgotpnuhx8gMaQ0XlpALbbyvhuBtIF4CwQWWopPTQfywDRyqQLweZJv/qAAidyhXyQ0QvAhkSBDfELGLqoMunwEA8HQ7748G8bUgXgSSJojngHgqNPMSaLJAZZ+9G34tOSPpACLklWHKS8ir2gxALD6Ug/hKkCwBcSEET4fGk0BmAXw8BWQ8YlNSVSD5WpTYx0XqXlW2zddbEHwdiEtBRiEEl4L4Ggto5MiEAETIsyJ0w1aQsQTCzAdJgpCTofEjNj5+BuI7ASBM3sIBJLSZsgPP5yD+AsTzkGm6AZAjECxKB8kQTz4DcSBMUZ61BhDGiSDe0sA3YU5X79tyDITkQzalXwPBH4I4AMG5IM4FyYBypdfZ+HYXyOwTwdvCCCthj4qXToPGkyCMmZZ+lDdEZLy+EGUtnwfiDTZA2wIyFkbM0+O2edoL4tuazJMzAJHDbbvkKkuRGvG5azQ+EMS32x5sK4TMbQZAtkPwbOjcF4J7I5O7wR/ohizZCySLbDuHXSDegGx5cMN1xsg0aHy5ze2wHRoHcM794W6S+npAyJdtk/M5hHFahPButy28jzCu6ASQeRB8gZ4QwXTogXRosq9Sio1BVmrGlRCdt5NtPtk6kLwRmumscl83EwMQX6AfiF+1zWslfDwSZHaP4MUhqqp7ty2dMdgQ64oeRGeQ0Rua7I1sozsy56XDF+gO3cyIsFTqMNYMtx6yZ6WBjJ7Qze7wze+GTG7q5tLkJBvfaiHkCuSWdk8ygFTYBOUzaGbT6HGudDI38QGIZvSELnuASrphnNmUBxmlh4H4A9szbkFOyCoH4JfdIeSSsNR7wQ9A8JHwc29o3B1jZ6XBL7uDuCeE7A2f7B43gIww06FxqU12t0Djybh4afizzl7UzXLx2nipR1iMTQGkSiWU1EHIRyHkFRB8MnzcG6MNOAYQLdhPfXZ9Q7Guxq8DAK4KW/M5Nl1TjXGyX1wA4gucHrYhI7kJY40jkV3SyBN9QRp0HqJKHULX2gjioTa+Hwzix22f74GQRRCyEWRmZgMU7AaSA8LALhJAhpvp0HiJTU99Co0nYPii8NjPtLLuEPJ92zztgR44LhkxkNG2XXI1iO+DFjw4TiHsrSyJRoAgntAMgHwK4swm1+FjgOFreoLketvDbwUZoxp2D8LsDSEftF3rI4wKHhC2uwilZ2rSZ9slfAad58UAkBoQv4asYHrUtEnivDALgngZfIHu8WcQ8XTbveqg8Upkyp5NM5Y4DcIcBTJXg8wVEWMVdHNYwhaIkBkg3mR7z3ugy4OjZsVkGudHWImP4rQr0xPKwtKDofu+GAa6urykeSbVAz80yQY7RynP0LNfD1+gd1IBRAuzmD6HT14W9XtzjHSQOQNkrowyN1dCN09yZIG0GOTnbhDydttOdit8gRNt9+lttbyxKY5MPl+5tuKMA8UCkHpAmIcpV1OIRy9jZHHPGPKWZ1sfn0GPaIkSDiBV1m5ZzkQB93GcTdYsgBjjbG7bWmjFR4cl/5Bxs60tzws2mYkOIKHYhWXRNwKlkDJmphnJq8O8LhpPw+jiNPXsZ1tupAbr8nWQOSCGfm4mjbceEGZ/K3Df8FzPY0RxuKu9cZ7G24BzD/SIwlGHADLWtturBvG9EBH9fWKmBBq9oPFam/LZAeKJLQCIL6oSEXO7gYyltgW9A5rZGEX3FR+kkFwFCuVTMZ8rv+xQ2wKqA5m3NAsgFCNTQzfPsE10rdXDSvZOYJHPCAMQ4hUQUQAkqyQNxPOUkO2JGJ9DqEyxxADk8gjfamzrSZfHqAyQxusJ86iEAKRxvq+wfbcGJK8M31mXAwV69N8WLEyHz+wNIS8A8Xu2OboJFKFsXFsgfFMYgAg5JfpaWtRNpavviTK2QPBFSQcQn9kLQh6m3rsRQIhPbZQXo49KRqly1naiBQuEzOPCFZe8I+Z1ckqOBvGXNnlb0wyA1IC4UR77T3TqKowNINr8IyF4g83V3bheR5V0g2hwce0FBaa0CCCNnz8S5tnQQnEEG4U2orrMsGV57bbciLKHbXO3w6Zzb0R+abfEAQQAmSeFWyjypthZocXH2+apFmSWuwcQnUfYXFjVIH4Emjwsvl1S8EAIeWuYC0uL6NsUF4AAIE4D8Qyb5bATxI1dZkXJEbaXrwbxQ9D5dAge2mRQYKhNidVCyMccAYhmDlDxmtB3bwNx/EFwIS8Lc2EJvh5+burCyi5JA8m5yg9cFxHAr4HgSQkDCBmmTflUQpiTm9ntHqZiPtUN2XSCBzsCEE2OU+8REuwboqcdGodCyBFW6rZcDOLVSmHeBeJnbHxrJQCRa2zvuxd6DICdsKibihPV2fpkhe67HoIvSBhA7NkvOp8NkgXQZDEEr7J6y/EdKsj7kW2ObQACwLfgIAi2p9C/HTXpxCmAiOAg2zzustLj+YwY8jY8bMcv5N0tAMgdSYg1xQYQv9EHgm+26Z2HG/lmDLe91x5k86FxAwjx6zarYSeE2R8ZgVgydaba8IbWwZ0QSvathpuN8S2hNlmXXpg4gAjzzIh5uin2PBmjbPNUAxGxKXAGIPJChdYhxjwDEWdrbM04BMT3hZnRxH5HADJ2GUBhiqoSxI2717FFAyL8qNtV4GhjlLHBxvBaED/kCECy5x9tKzaqgZAPQjcPTWCRTwgPovNdENw0iD4mkAbNyAIZj4KM+9VCrXYMIAeVARrbA+jbIDi3GaV/kFLcoXu+B1Kp3AkDSOAsG9BXhSkTIQFtWho0Y7ay7DaFx6MaRmSvtdawQK4OC/4KXhT1excWpYOMhSDjAZDxkJVp1PB8iQNI6CwHCpwLjV9V63WrWu+RPLDzIRxARs09THULDvH56YYso2QASEbgPNs8hj6PJW8bI+TtjnYFEGtnfoVt/b+NDNWqXpjrbL3MHgEApGfHASAr0sLuJ/hjCO7XjEydaAts1yortrdysU22bZQrIUwOc5UlAiCZxkURABLvPIVbgi5cWGfYit2qIPhl6PL4+BYgH2a1t7bdUOPhjgDEPxcgzrL56Cqh8ZJGZc6nRyxoe557c2M3hFznCEB88kjboqkB8UMQMn4A0eQYm3VXC5KPQJN9WgZ1Y0aYGyhRAMlbkQaSi2x83wri7GYsyT5q1xuax/ehGWc7AhBhnB0OIBHKQsjZEQu+Uj3fpwpQNoLkhojOCMkHEE3OsvFnDzRZ0TSQHbG7zAt0g+C1NsvOmQUi5NCIDL+Q4H+q1ovaBMktMS2QnAWHRwWQZFkgfh4ZMU+JyNs17Q4gfh6mLDhL7wipK95vsxVMT42Yl9gAMv7a9PD78YcgPqIZmTrOymgMxWGMR6EZva0kH54a5qonnhtz7loCED9nRLRKSmSeliYBQAIh/3foxh9DyCFxCCAg5FEg+Y7thu9AN091BCB5Zhp0OdWmqHaCbAclWdkI9kDcRyqX/KEWxuPQjMnOAMR0ByDEg22LqBok3wTJg6AXtwQgs10BiHXvEpuS2tkkNhUuOKF07OqGHRvJ0xwBiE+ODXNhCXld45oJHgQh7fUilRC8EpoxFlnFg0BFlgLI5BMiXIfJBxAr9rfbBu6PW7xvZm78Rm91X7cA8lSEi/JBq1Ejn4ks81jopb1VoH+V7RnDAWTcgoPCFLyQbwEAbklWDERGWiDvxSlvj0EL5Lc7gGSYB4L4OZvCnw/duKChVxbJndCLB8YNIE1cWLwFxP1jtnkRckiEC+uehvipkJeE1X8IWQYA2OEkBsKRFsi7CcxTjnsAEaXd1QUbayeEzML0S1vezRCfF9HB95e40OjlLAYSSAcZHIbMQs5s/LzkCBujqiDYEvgCs1ezY3ywFwZd0a1dACRjSneIhiC8cpUET4rDAkkGgMyyLYTqsEBiE8Vo9ofgp2zffwU+VUyaeBB9os2KrIHgxmMuKajZrlMHIW+MnpzBQ9Q5Da1ogcw5VNUeNVb9XrKq+Qy7ZAAIBe3gWA3Br4KitH7JNNJVgsru6EF07qM2BqE18HFyg+jBQWE7W6HqBuKRtwHT0tsdQKz3W2lzDd6u0ntD8vAoiA8La5jaIoDIZ8IbzjZTL+c3L7R5H+qg8Vr4VAINcbZtDVWDmulk3iKABM8MK38QfGPc83RCxDw57sZr5XvvtPnqH8PBf07DFF+swHvIz3hTRD+hNVEWacsAkmECfqM7RFjb860Q5tiw3Wt4mu/7CS24EfPaHkCsSbk5LPeauFzt1FsbQHIiihhXI7e0V4xFOiTiTIyn4C/qkxCAaEbo2W4NbxFi5Nuy2q6wffY5SKX4RrqK2gJArGu8FFGhO6/ZuUkGgOjBMyNSlG8FyQOb8MDH3WIDCKwUeiFfsT3/JujFFujH20W6xSwsaV9jzzuWt7YGkFBaLZmjbXptg7IaQhbnoihWQ3QACWVWWYkejVlY/mBe0+SQUIyLi2z33gNiA3ogXenbkbYMrWoQPwhdHuIiC8suu086nifHAKKbZ9leKFR8F4Relo6RTTEBA4vSoPPkCN9bJfw8Mg4AyYzh+jgTQu4Lq/z2lxwVITD3h/n1W6wxiInqbQcgFBzVpBLfJ7OxopmuwCIJAGJl0bxr22G/A8FN21trhekQckZYAaDgtY1B5AQsEM04s0khoR48wTaHFF6fI80YFshptmevAcmboQUtAAm1lEjKeSDmlIjn3QKfHIr8srQYG6dergFEyBNtrYNqIPhh6FHW1JjCbtD42jAA0YKDbNfpoTK26mznmFzZ4jufI+LMwpKHqSOtaxqK4XTpi4uvOdOREhaIJetbovSq2gw9yln1LabxmiKsgl7w4xhzRdO1ki0PVG6kxoP5dGOsje+DbMk5qvgveEFT/RPsBuKTmy0kFLKf6gpR3bAm9YgD6OKdJ0cAcvw5oQdZHZYZYOXGPwiS+RDyQpA8F8K8ACT90PgWJXiVtqKau6zskCnNAcg2EEuQHAqSZ4J4EEieBeKJEGEoWgtd3m/tJpQLPb8sDUJOjWgRsB5CzgTJC6DxaSA+HZpxtirtHwYhR0CTPmhmersAyOzQucvy/oieN7utc7WlH8QXgOS5ljtQXggKjlEV8LtdAYj1nneHVbsSPwwyh0PjUyB4APzydJAxHSR32b63E5o6DnTy5GgAshMkl0Pj86CrIXg4BBeq/Ppdtsp7K7B7zOWh5z4xwg35CXyBM5FpHAkhjwWZgyGM4dCMUlsRZDWIX4JfDsJF+WlJs0BmLwDGci+1i68Lb9chF0OYGSAepuZmKEheBDL9KmmkxrkFwr1V111bEoFxCTL5WJBxFMg8GYLPgxYYDwo77GoHdL4M49UudrgBaJwbEYzfAZLLQcGxEHweSJ4LjS8E8ShowUz4I4oemwOQiYvSIWSJ7f7VIH4fwpwCksMa5I1YyZu8wJI3s+kGsV1iIIGQ7F0XpZ/UW/AHj0wYQJCVBuKPw9K/iVeDzHOg8UkQfBJ0ORTEK0DSfqrri9BsnZAvKQp5W3bZPAS/hMZZEHw+ND4fGmeApAHBz0SkjocDyKWL0yHkorCedsTvWm2dEpwnRwASMne1eQeC5L0RAZnQ+c+hlNmttnz4RmuF+GmQagcQGVRq2gsrFID6UPmC10cck1sDwbuQUTSgSVqikMeo9hzVEY3Mtqq00PfVBG+07Tx+DWH0itFMsXUBJGRKZxUfq9wlESDS8OwblMLcZmuC5rwOxKdcjGL+KRC83cavUNPJ10H8S9XHZ09DqxNr7sNNzujNFK3MKcEbIXij+vtnESmCu6BLKxXYflgYhR3tW63m6hmrI7R8Wz3fnihHzL4CMk5JGoAMCwAzlgG6HK74X2Nbn6EmgZ+qzzarbJmIJoeOg+g3hDeM5B0gfsHig3xN3W9vlE6sG0F8KbSAFdMTxf3VuqqOePZqtZ42qDVVrWJOc+IGEOtdToEIq0UJydsWFVR/X/XK2mhzl37VJKOoPS0QTQ5T965pGELeEyPwHRtAGmQqOE7NTZVNpjYrvfSqkofd4WnotiLVEctC99IjgKFGydCnatRFWW/Re2ERD4JoSNaJZ56qQPxlk3ly7MIKFTbpsi90uS5KMduuGC2Ld1u7axVMinYQVFMAiUw12xWOxLwDwrSaFq6OMs9ZxWcrJtQ2k7pm66LLX2PM0gPazYUV6tNv5YU/EaUYrSUe73FUid5odp8JIXdEgFIkr6wFJeRaCFUtGxKY6ADS3KiF4G3wy2xMnJOGUaoJUaivlG6OiAIQ9uepsbU6t8/xFyDzjKTGQBrdpxcqMN0do636rhittjdD8IVxA0iIBzmlfZXSqY3Bg+rGFudhrbl3W00SbR1rRfBskNwah7xWQkQUS8bTjTer+KIYbcWjraFqCP5NSlggTWNdrytZf60BGApKE7RAAPgXpEOYk0GyNoInVVE2PV9Al3PDQCh8Q1yiNm1VMdZYrapRarkbb1bxCNsRCfHM06+TY4FE9fXKC0F8swpab1KLfUtErv49IJtPNNYpguEAUmm7xqe2a24G8ScQ8mbogePV7iq2Ms4qPhgkK2y7w09tI7Tz+ghCPgqSV2NaWc8IAAkh8ga1+40FICH/Yui7d0DECHY1r6Ds8aY8CPmQAgA7Hzbbnn29igE8ACGXIktVhTdO9FEQ/Jb63icgfiy64Ch+jSs8CIKvUVbfJhuvVM2BfKHBv61HzOOF6wBNkgLt9WoXsykG39dD5zXIlNGPEg1Zp0KOVsK/OeL3H4P4JgjjIviLekDnKyFkyDqrgV48wMaDFerdP7H8/1HaSiQ+N3Mg5PNR3s9Wm8EblDK6E0KayOKjIuaGVJA8tOMrisqDSQsPhWbcZ5MB+30et2oF5vWBzgJCvqH+fQdIzkN2WboCj9A79LXOaJcbovB0o5q3FyBkZFbRVPW9T5RMFESVN3/wcNUVdkMUvoTW7AfK5b0E9fWRANJfyX5Ijq5LAoCcbJPN9SB5D7Tg4VG+FyNxyIhmgSyxrant0AytmU33EAh5t+JvJE/WQ/D90ItPjipTqAfyp4V0Ean1FHEN+SYEz4fPGAySr9hk/ZGYetEf7AeSN8cxTw+A5OIovOoNwa/b7vVaYpOSXQKMmROBuMETQcEzIfgiEJ8NPw/AEr9N4d7T/NnLTYPoOcidfxyEPAt+vggkh0LIU5ssutgB/8b//696gOSpls/QuBDEZ0ELnISR3LRYr7zcOmlu1vlxZqZJJJX0iLizmNsTxCfBHzwHgi+CME+DXnwsxi+K3ofM30KjucxgWlxVyEKeZFVBGxeA+HTkBOI/KGt2QTp88w9BdvGxyJJDLB+r4ntm0bFh350+Gy2ew5xtngKNL4Dgc5BVcnwzyuJYaMZ5uGRmqBVE9CD35MnJmRu96BCQPAV+cyh0vgBkDkJWyVG4ZMkBzc5NLGWVGQx/NvvON7f4CGg8FMIYBr34ZIw3esa4xxHQ+DSIomMw79LY61SYljyQHAZ/8GT4godGzL+ay/NiJZjElrfxV3UHyUHQ2Jp3zTgTWmAARnDvqPIGxD7dkWLMYXzg0Yzcmg6vKdPiApphAWCq7f6ZU7tDyEHQjGEgPh/EA5FvNPKjuboiO6/9weNVrO18+CNOjoyHd/Z5ylvUwzZPF6h5OrHpPOWEu5fd6JWY/nu3FG8WFgDcEeGTc0uxjsXtKDQsiWcvxzpbHAC2IXobhcj4RTwUz4l+zZ2xbVfmBXObfh7t35JFiRxZ7HZu4j1nPFnvm2G2/nrt6PKWTJm64rjYMmWnvGbA8LNE+V/eCecp3kJCjzzyyCOPPPIAxCOPPPLIIw9APPLII4888gDEI4888sijjgUgV6uWJ3tUKh95TPHII4888qhlypTHQ5PnQ5NDocnzoMm+HlM88sgjjzyKTcMCwLLoh7wlvcbCI4888sgjjzzyyCOPPPLII4888sijTkL/HzWjSfdjPJHPAAAAAElFTkSuQmCC" align="left" width="250"> ### ITS_live data on each glacier directory This is an example notebook on how to extract velocity fields from the [ITS_live](https://its-live.jpl.nasa.gov/) Regional Glacier and Ice Sheet Surface Velocities Mosaic ([Gardner, A. et al 2019](http://its-live-data.jpl.nasa.gov.s3.amazonaws.com/documentation/ITS_LIVE-Regional-Glacier-and-Ice-Sheet-Surface-Velocities.pdf)) at 120 m resolution and reproject this data to the OGGM-glacier grid. For a detail introduction on how to use the OGGM-shop and the glacier directories see the following [link](https://github.com/OGGM/oggm-edu-notebooks/blob/master/oggm-tuto/oggm_shop.ipynb). In this notebook we will only focus on how OGGM process the ITS_live data. ### Set-up ``` from oggm import cfg, utils from oggm.shop import its_live, rgitopo import xarray as xr import salem import numpy as np import matplotlib.pyplot as plt import warnings warnings.simplefilter(action='ignore', category=FutureWarning) cfg.initialize(logging_level='WORKFLOW') ``` ### Workflow ``` import os from oggm import workflow, tasks cfg.PATHS['working_dir'] = utils.gettempdir(dirname='its_live_example', reset=True) ``` Here is where your data will be stored. ``` cfg.PATHS['working_dir'] ``` > If you need a diferent folder directory you can specify this in `cfg.PATHS['working_dir']` Now lets select a glacier in Greenland and initialise the glacier directory with the following code: ``` # The RGI version to use # V62 is an unofficial modification of V6 with only minor, backwards compatible modifications prepro_rgi_version = 62 # Size of the map around the glacier. prepro_border = 10 # Degree of processing level. This is OGGM specific and for the shop 1 is the one you want from_prepro_level = 1 # URL of the preprocessed Gdirs base_url = 'https://cluster.klima.uni-bremen.de/data/gdirs/dems_v1/default/' gdirs = workflow.init_glacier_directories(['RGI60-05.00800'], from_prepro_level=from_prepro_level, prepro_base_url=base_url, prepro_rgi_version=prepro_rgi_version, prepro_border=prepro_border) gdir = gdirs[0] from oggm import graphics graphics.plot_googlemap(gdir, figsize=(6, 6)) workflow.execute_entity_task(rgitopo.select_dem_from_dir, gdirs, keep_dem_folders=True); workflow.execute_entity_task(tasks.glacier_masks, gdirs); ``` By applying the entity task [its_live.velocity_to_gdir()](https://github.com/OGGM/oggm/blob/master/oggm/shop/its_live.py#L185) the model downloads and reprojects the ITS_live files to a given glacier map. The velocity components (**vx**, **vy**) are added to the `gridded_data` nc file stored on each glacier directory. According to the [ITS_LIVE documentation](http://its-live-data.jpl.nasa.gov.s3.amazonaws.com/documentation/ITS_LIVE-Regional-Glacier-and-Ice-Sheet-Surface-Velocities.pdf) velocities are given in ground units (i.e. absolute velocities). We then use bilinear interpolation to reproject the velocities to the local glacier map by re-projecting the vector distances. By specifying `add_error=True`, we also reproject and scale the error for each component (**evx**, **evy**). ``` workflow.execute_entity_task(its_live.velocity_to_gdir, gdirs); ``` Now we can read in all the gridded data that comes with OGGM, including the ITS_Live velocity components. ``` ds = xr.open_dataset(gdirs[0].get_filepath('gridded_data')) ds # get the wind data at 10000 m a.s.l. u = ds.obs_icevel_x.where(ds.glacier_mask == 1) v = ds.obs_icevel_y.where(ds.glacier_mask == 1) ws = (u**2 + v**2)**0.5 ``` The `ds.glacier_mask == 1` command will remove the data outside of the glacier outline. This is an example on how to visualise the data. ``` # get the axes ready f, ax = plt.subplots(figsize=(6, 6)) # Quiver only every 7th grid point us = u[4::7, 4::7] vs = v[4::7, 4::7] sm = ds.salem.get_map(countries=False) sm.set_shapefile(gdir.read_shapefile('outlines')) sm.set_data(ws) sm.set_cmap('Blues') sm.set_lonlat_contours(interval=1) sm.plot(ax=ax) sm.append_colorbar(ax=ax) # transform their coordinates to the map reference system and plot the arrows xx, yy = sm.grid.transform(us.x.values, us.y.values, crs=gdir.grid.proj) xx, yy = np.meshgrid(xx, yy) qu = ax.quiver(xx, yy, us.values, vs.values, units='width') qk = ax.quiverkey(qu, 0.7, 0.95, 50, '50 m s$^{-1}$', labelpos='W', coordinates='axes') plt.tight_layout() plt.show() ```
github_jupyter
from oggm import cfg, utils from oggm.shop import its_live, rgitopo import xarray as xr import salem import numpy as np import matplotlib.pyplot as plt import warnings warnings.simplefilter(action='ignore', category=FutureWarning) cfg.initialize(logging_level='WORKFLOW') import os from oggm import workflow, tasks cfg.PATHS['working_dir'] = utils.gettempdir(dirname='its_live_example', reset=True) cfg.PATHS['working_dir'] # The RGI version to use # V62 is an unofficial modification of V6 with only minor, backwards compatible modifications prepro_rgi_version = 62 # Size of the map around the glacier. prepro_border = 10 # Degree of processing level. This is OGGM specific and for the shop 1 is the one you want from_prepro_level = 1 # URL of the preprocessed Gdirs base_url = 'https://cluster.klima.uni-bremen.de/data/gdirs/dems_v1/default/' gdirs = workflow.init_glacier_directories(['RGI60-05.00800'], from_prepro_level=from_prepro_level, prepro_base_url=base_url, prepro_rgi_version=prepro_rgi_version, prepro_border=prepro_border) gdir = gdirs[0] from oggm import graphics graphics.plot_googlemap(gdir, figsize=(6, 6)) workflow.execute_entity_task(rgitopo.select_dem_from_dir, gdirs, keep_dem_folders=True); workflow.execute_entity_task(tasks.glacier_masks, gdirs); workflow.execute_entity_task(its_live.velocity_to_gdir, gdirs); ds = xr.open_dataset(gdirs[0].get_filepath('gridded_data')) ds # get the wind data at 10000 m a.s.l. u = ds.obs_icevel_x.where(ds.glacier_mask == 1) v = ds.obs_icevel_y.where(ds.glacier_mask == 1) ws = (u**2 + v**2)**0.5 # get the axes ready f, ax = plt.subplots(figsize=(6, 6)) # Quiver only every 7th grid point us = u[4::7, 4::7] vs = v[4::7, 4::7] sm = ds.salem.get_map(countries=False) sm.set_shapefile(gdir.read_shapefile('outlines')) sm.set_data(ws) sm.set_cmap('Blues') sm.set_lonlat_contours(interval=1) sm.plot(ax=ax) sm.append_colorbar(ax=ax) # transform their coordinates to the map reference system and plot the arrows xx, yy = sm.grid.transform(us.x.values, us.y.values, crs=gdir.grid.proj) xx, yy = np.meshgrid(xx, yy) qu = ax.quiver(xx, yy, us.values, vs.values, units='width') qk = ax.quiverkey(qu, 0.7, 0.95, 50, '50 m s$^{-1}$', labelpos='W', coordinates='axes') plt.tight_layout() plt.show()
0.349311
0.206014
# Calculate Z-Score Index (ZSI) with Python This index is also as simple as RD and calculated by subtracting the long term mean from an individual rainfall value and then dividing the difference by the standard deviation. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` ## Prepare data ``` data = pd.read_csv('data/prcphq.046037.month.txt', sep=r"\s+", skiprows=1, usecols=[1, 2], parse_dates=True, index_col = 0, names=['Date', 'Rain']) ``` ## Calculate six-monthly ZSI Here we use all years as a reference period to calculate monthly long-term normals. ZSI = (p-pm)/s ``` data['Rain_6'] = data['Rain'].rolling(6).sum() df_6mon = data[['Rain_6']].dropna() df_6mon['ZSI'] = np.nan for imon in np.arange(1, 13): sinds = df_6mon.index.month==imon x = df_6mon[sinds] y = (x -x.mean())/x.std() df_6mon.loc[sinds, 'ZSI'] = y.values[:,0] data['ZSI'] = df_6mon['ZSI'] del df_6mon data.head(7) ``` ## Visualize ``` ax = data['ZSI'].plot(figsize=(15, 7), ) ax.axhline(1, linestyle='--', color='g') ax.axhline(-1, linestyle='--', color='r') ax.set_title('Six-Monthly Z-Score Index', fontsize=16) ax.set_xlim(data.index.min(), data.index.max()) ax.set_ylim(-3, 3) ``` ## Summary and discussion The Z-Score does not require adjusting the data by fitting the data to the Gamma or Pearson Type III distributions.Because of this, it is speculated that Z-Score might not represent the shorter time scales (Edwards and Mckee, 1997). Because of its simple calculation and effectiveness, Z-Score have been used in many drought studies (Akhtari et al., 2009; Komuscu, 1999; Morid et al., 2006; Patel et al., 2007; Tsakiris and Vangelis, 2004; Wu et al., 2001; Dogan et al., 2012). Various researchers also acclaimed that it is as good as SPI and can be calculated on multiple time steps. It can also accommodate missing values in the data series like CZI. ## References Akhtari, R., Morid, S., Mahdian, M.H., Smakhtin, V., 2009. Assessment of areal interpolation methods for spatial analysis of SPI and EDI drought indices. Int. J. Climatol. 29, 135–145. Dogan, S., Berktay, A., Singh, V.P., 2012. Comparison of multi-monthly rainfall-based drought severity indices, with application to semi-arid Konya closed basin, Turkey. J. Hydrol. 470–471, 255–268. Edwards, D.C., Mckee, T.B., 1997. Characteristics of 20th century drought in the United States at multiple time scales. Atmos. Sci. Pap. 63, 1–30. Komuscu, A.U., 1999. Using the SPI to analyze spatial and temporal patterns of drought in Turkey. Drought Network News (1994-2001). Paper 49. pp. 7–13. Morid, S., Smakhtin, V., Moghaddasi, M., 2006. Comparison of seven meteorological indices for drought monitoring in Iran. Int. J. Climatol. 26, 971–985. Patel, N.R., Chopra, P., Dadhwal, V.K., 2007. Analyzing spatial patterns of meteorological drought using standardized precipitation index. Meteorol. Appl. 14, 329–336. Tsakiris, G., Vangelis, H., 2004. Towards a drought watch system based on spatial SPI. Water Resour. Manag. 18, 1–12. Wu, H., Hayes, M.J., Weiss, A., Hu, Q.I., 2001. An evaluation of the standardized precipitation index, the china-Zindex and the statistical Z-Score. Int. J. Climatol.21, 745–758. http://dx.doi.org/10.1002/joc.658.
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline data = pd.read_csv('data/prcphq.046037.month.txt', sep=r"\s+", skiprows=1, usecols=[1, 2], parse_dates=True, index_col = 0, names=['Date', 'Rain']) data['Rain_6'] = data['Rain'].rolling(6).sum() df_6mon = data[['Rain_6']].dropna() df_6mon['ZSI'] = np.nan for imon in np.arange(1, 13): sinds = df_6mon.index.month==imon x = df_6mon[sinds] y = (x -x.mean())/x.std() df_6mon.loc[sinds, 'ZSI'] = y.values[:,0] data['ZSI'] = df_6mon['ZSI'] del df_6mon data.head(7) ax = data['ZSI'].plot(figsize=(15, 7), ) ax.axhline(1, linestyle='--', color='g') ax.axhline(-1, linestyle='--', color='r') ax.set_title('Six-Monthly Z-Score Index', fontsize=16) ax.set_xlim(data.index.min(), data.index.max()) ax.set_ylim(-3, 3)
0.212314
0.961025
# First Order Ego Graph Analysis on Facebook User 1 In this notebook we analyze the first order egograph of a Facebook account with $\sim10^3$ friends. ## Modules ``` # Enable interactive numpy and matplotlib %pylab inline # Data Wrangling import pandas as pd import numpy as np # Data Analysis import powerlaw as pwl # Data Visualization import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker # Network Analysis import networkx as nx from networkx.algorithms import community import networkx.algorithms.centrality as nc import social_physics as soc # Network Epidemiology import EoN # Data Visualization import seaborn as sns from netwulf import visualize # Other Utilities import sys, os, os.path import itertools from progressbar import ProgressBar, Bar, Percentage from operator import itemgetter from collections import Counter from collections import defaultdict import random # Reload Custom Modules from importlib import reload soc = reload(soc) ``` ## Graph Data Collection Import the (undirected) graph. ``` # Import graphml file G = nx.Graph(nx.read_graphml("/Users/pietromonticone/github/SocialPhysicsProject/Data/GraphML/Facebook1.graphml")) # Rename the graph G.name = "Facebook Friend EgoGraph" # Show the basic attributes of the graph print(nx.info(G)) # Relable the nodes (from strings of Twitter IDs to integers) G = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None) ``` ## Graph Visualization ```python visualize(G) ``` ![](./Images/Facebook/Facebook1.png) ## Graph Data Analysis Let's visulize normalized degree distribution. ### Degree Distribution ``` # Get degree distribution undirected_degree_distribution, degree_mean, degree_variance = soc.get_degree_distribution(G, "degree") # Set figure size plt.figure(figsize=(9,6)) # Plot undirected degree distribution soc.plot_degree_distribution(undirected_degree_distribution, title = "Degree Distribution", log = False, display_stats = True) # Show mean and variance of the undirected degree distribution print("Mean = ", degree_mean,"\nVar = ", degree_variance) ``` ### Logarithmic Binning The black line is the empirical linearly binned pdf. ``` # Set figure size plt.figure(figsize=(7,4)) # Plot pwl_distribution = soc.power_law_plot(graph = G, log = True,linear_binning = False, bins = 1000, draw = True) ``` The empirical PDF doesn't interpolate because it is obtained via linear binning while the red data points represent the logarithmic binning. ### Linear Binning ``` # Set figure size plt.figure(figsize=(7,4)) # Plot pwl_distribution = soc.power_law_plot(graph = G, log = True,linear_binning = True, bins = 90, draw = True) ``` The figure above interpolates because it uses linear binnig both for scatter plot and pdf binning. ### Power Law Fitting Here we estimate the measure to which the network follows a power law, and compare it with other common distributions. #### Parameters Estimation ``` fit_function = pwl.Fit(list(undirected_degree_distribution.values())) print("Exponent = ", fit_function.power_law.alpha) print("Sigma (error associated to exponent) = ",fit_function.power_law.sigma) xmin = fit_function.power_law.xmin print("x_min = ",xmin) print("Kolmogorov-Smirnov distance = ",fit_function.power_law.D) ``` Because the fitted $x_{min} = 11$, let's require it to be a little higher prior to fitting ``` fit_function_fix_xmin = pwl.Fit(list(undirected_degree_distribution.values()),xmin= 5) print("Exponent = ", fit_function_fix_xmin.power_law.alpha) print("Sigma (error associated to exponent) = ",fit_function_fix_xmin.power_law.sigma) print("x_min = ",fit_function_fix_xmin.power_law.xmin) print("Kolmogorov-Smirnov distance = ",fit_function_fix_xmin.power_law.D) ``` Now the error (sigma) is way lower than before, but Kolmogorov-Smironv is higher as expected (because we fixed $x_{min}$ prior to fitting). Thus we confirmed that a power law fitting is rather good for this network. It is to be recalled that power laws are usually able to explain most of the variance though.<br> Let us now compare the actual pdf with the fitted power law near the tail.<br> <span style="color:blue">BLUE</span> : Fitted power law <br> <span style="color:black">BLACK</span> : plotted pdf ``` # Set figure size plt.figure(figsize=(7,4)) pwl_distribution = soc.power_law_plot(graph = G, log = True, linear_binning = False, bins = 90, draw = True, x_min = xmin) fit_function.power_law.plot_pdf(color='b', linestyle='-', linewidth=1) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.xlabel('$k$', fontsize=16) plt.ylabel('$P(k)$', fontsize=16) plt.show() ``` Also, let's plot the power law fitted with the $x_{min}$ fixed. ``` # Set figure size plt.figure(figsize=(7,4)) #plt.plot(x,y,'ro') pwl_distribution = soc.power_law_plot(graph = G, log = True,linear_binning = False, bins = 90, draw = True, x_min = xmin) fit_function_fix_xmin.power_law.plot_pdf(color='b', linestyle='-', linewidth=1) #fig.legend(fontsize=22) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.xlabel('$k$', fontsize=16) plt.ylabel('$P(k)$', fontsize=16) plt.show() ``` Let us now compare power law against other probability distributions. Remember that $R$ is the log-likelihood ratio between the two candidate distributions which will be positive if the data is more likely in the first distribution, and negative if the data is more likely in the second distribution. The significance value for that direction is $p$ (the smaller the better). ``` R,p = fit_function.distribution_compare('power_law', 'exponential', normalized_ratio=True) R,p R,p = fit_function.distribution_compare('power_law', 'lognormal_positive', normalized_ratio=True) R,p R,p = fit_function.distribution_compare('power_law', 'truncated_power_law', normalized_ratio=True) R,p R,p = fit_function.distribution_compare('power_law', 'stretched_exponential', normalized_ratio=True) R,p ``` Let us also compare with the truncated power law: ``` R,p = fit_function_fix_xmin.distribution_compare('power_law', 'exponential', normalized_ratio=True) R,p R,p = fit_function_fix_xmin.distribution_compare('power_law', 'lognormal_positive', normalized_ratio=True) R,p R,p = fit_function_fix_xmin.distribution_compare('power_law', 'stretched_exponential', normalized_ratio=True) R,p ``` ## Centrality Metrics Now we turn to plot centralities. #### Degree ``` # Get degree centrality degree_centrality = soc.get_centrality(G, "degree") # Set figure size plt.figure(figsize=(7,4)) # Plot centrality distribution soc.plot_centrality_distribution(G, degree_centrality, "Blue", 15) ``` #### Closeness In connected graphs there is a natural distance metric between all pairs of nodes, defined by the length of their shortest paths. The '''farness''' of a node ''x'' is defined as the sum of its distances from all other nodes, and its closeness was defined by Bavelas as the reciprocal of the farness that is: <center> $C(x)= \frac{1}{\sum_y d(y,x)}.$ </center> Thus, the more central a node is the lower its total distance from all other nodes. Note that taking distances ''from'' or ''to'' all other nodes is irrelevant in undirected graphs, whereas in directed graphs distances ''to'' a node are considered a more meaningful measure of centrality, as in general (e.g., in, the web) a node has little control over its incoming links. ``` # Get centrality (computationally intensive!) closeness_centrality = soc.get_centrality(G, "closeness") # Set figure size plt.figure(figsize=(7,4)) # Plot centrality distribution soc.plot_centrality_distribution(G, closeness_centrality, "Blue", 30) ``` #### Bewteenness ``` # Get centrality betweenness_centrality = soc.get_centrality(G, "betweenness") # Set figure size plt.figure(figsize=(7,4)) # Plot centrality distribution soc.plot_centrality_distribution(G, betweenness_centrality, "Blue", 30) ``` #### Katz ``` # Get centrality katz_centrality = soc.get_centrality(G, "katz") # Set figure size plt.figure(figsize=(7,4)) # Plot centrality distribution soc.plot_centrality_distribution(G, katz_centrality, "Blue", 30) ``` #### Eigenvector ``` # Get centrality eigenvector_centrality = soc.get_centrality(G, "eigenvector") # Set figure size plt.figure(figsize=(7,4)) x_centrality=[] y_centrality=[] for i in eigenvector_centrality: x_centrality.append(i[0]) y_centrality.append(i[1]) plt.scatter(x_centrality,y_centrality, color="Blue", marker="o",alpha=0.40) #plt.yscale('log') #plt.xscale('log') plt.xlabel('$x$', fontsize = 15) plt.ylabel('$P(x)$', fontsize = 15) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.show() ``` #### PageRank $$x_i=(1-\alpha) \sum_{j}A^{T}_{ij}\frac{x_j}{k^{out}_j}+\frac{\alpha}{N}$$ ``` # Get centrality pagerank_centrality = soc.get_centrality(G, "pagerank") # Set figure size plt.figure(figsize=(7,4)) # Plot centrality distribution soc.plot_centrality_distribution(G, pagerank_centrality, "Blue", 30) ``` ## Connectivity ``` # Show the connectivity of the analyzed graph print("The graph has", G.number_of_nodes(), "nodes and", G.number_of_edges(),"edges.") print("Is the graph connected?", nx.is_connected(G),".") G_cc = sorted(list(nx.connected_components(G)),key=len, reverse=True) print("The graph has", len(G_cc),"connected components.") print("The sizes of the connected components are", [len(c) for c in sorted(G_cc, key=len, reverse=True)],". \nThus the GCC represents ", len(G_cc[0])/len(G), " of the nodal cardinality.") ``` ## Clustering Below the evaluation of the *average clustering coefficient* and the *global clustering coefficient* may be found. ### Global clustering coefficient The global clustering coefficient measures the number of triangles in the network and it's defined as $$ C_\Delta = \frac{3 \times \text{triangles}}{\text{triplets}} $$ In order to compare our graph with theorical models (of the same size), it is thus sufficient to evaluate the number of triangles ``` # Compute the global clustering coefficient of U (the fraction of all possible triangles in the network) print("Global clustering coefficient = ", nx.transitivity(G)) ``` ### Average clustering coefficient The overall level of clustering in a network is measured by Watts and Strogatz as the average of the local clustering coefficients of all the vertices $n$: $$\bar{C} = \frac{1}{n}\sum_{i=1}^{n} C_i.$$ It is worth noting that this metric places more weight on the low degree nodes, while the transitivity ratio places more weight on the high degree nodes. In fact, a weighted average where each local clustering score is weighted by $k_i(k_i-1)$ is identical to the global clustering coefficient. <br> As per [this](https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.cluster.clustering.html) and [this](https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/cluster.html ) resources we notice that Networkx's `average_clustering` function automatically takes care of the network being directed or not. ``` G_avg_cc = nx.average_clustering(G) print("The average clustering coefficient is ", G_avg_cc) ``` ## Path-ology ### Average shortest path length ``` GWCC = list(G_cc[0]) print("Since the graph is not connected, but one of its 3 weakly connected compoments amounts for ",len(GWCC)/len(G), "of the nodes count, we approximate its averaege shortest path length with that of its bigger connected component, which is:", nx.average_shortest_path_length(G.subgraph(GWCC)), "\nLet's compare it with lnlnN = ", math.log(math.log(len(GWCC))), "(ultra small world)\nand with lnN/lnlnN = ", math.log(len(GWCC))/math.log(math.log(len(GWCC))), "(equivalent to a power law with exponent 3)\nand with lnN/ln(<k>) = ", math.log((len(GWCC)))/math.log(12.0171), "equivalent to a random network.") ``` ## Comparisons ### G vs. ER The most natural benchmark is a ER (random) network with the same number of nodes and links. In a ER netork, the p_k is poissonian ( an exponential decay) , so let's compare G with random **Erdos-Renyi** graph with the same average connectivity and number of nodes. ``` nnodes = G.number_of_nodes() nedges = G.number_of_edges() #plink = 0.07803 plink = 2*nedges/(nnodes*(nnodes-1)) #2* because it is undirected ER = nx.fast_gnp_random_graph(nnodes, plink) average_degree = sum(list(dict(ER.degree()).values()))/len(ER.degree()) # Connectivity print("The ER graph has", len(ER), "nodes", "and",len(ER.edges()),"edges.\n The difference between its maximum and minimun degree is:",max(list(dict(ER.degree).values()))-min(list(dict(ER.degree).values())),", while the sane difference in our network is:", max(list(dict(G.degree).values()))-min(list(dict(G.degree).values())),"which is higher, confirming that real nertworks are not random.") # Test connectedness print("Is the ER graph simply connected ?", nx.is_connected(ER), ". Infact the average degree is:", average_degree,"and the natural log of the number of nodes is", math.log(nnodes),"which is smaller, then we are in the connected regime.") # Average clustering coefficient print("The average clustering coefficient of ER is", nx.average_clustering(ER),"which, if compared with <k>/N", average_degree/(nnodes), "we can observe they are similar as expected. But it is approximately one order of magnitude less than the egonetwork's one. ") # Total number of triangles print("The transitivity of the network is", nx.transitivity(ER)) # Average shortest path print("The ER graph is small world since the average shortest path is", nx.average_shortest_path_length(ER), "\n.And the expected result is lnN/ln(<k>) = ", math.log(len(ER))/math.log(plink*(nnodes-1))) ``` ### G vs. AB Thinking about a broad (not exponential decaying) distribution, more like a power law, we may think about a AB network (albert-barabasi), so let's compare G with random **Albert-Barabasi** graph with the same average connectivity and number of nodes. ``` n = G.number_of_nodes() m = int(G.number_of_edges() / G.number_of_nodes()) AB = nx.barabasi_albert_graph(n,m) # Test connectedness print("Is the AB graph simply connected ?", nx.is_connected(AB)) # Connectivity print("The AB graph has", len(AB), "nodes", "and",len(AB.edges()),"edges ..\n The difference between its maximum and minimun degree is:",max(list(dict(AB.degree).values()))-min(list(dict(AB.degree).values())), ", while the sane difference in our network is:", max(list(dict(G.degree).values()))-min(list(dict(G.degree).values())), "which is of similar order of magnitude, confirming that albert barabasi captures the fundamental mechanisms that underly real networj formation better than a random network would.") # Average clustering coefficient print("The average clustering coefficient of AB is", nx.average_clustering(AB),". We may compare it with the predicted C_l = (m*ln(N)^2)/(4*N) = ",(m*(math.log(n))**2)/(4*n),"while the global clustering coefficient is: ",nx.transitivity(AB),".") # Average shortest path print("The AB graph is small world since the average shortest path is", nx.average_shortest_path_length(AB), "and the expeted result is lnN/lnlnN", math.log(len(AB))/math.log(math.log(len(AB)))) ``` Let's verify that an AB network follows a power law distribution ``` # Create the degree distribution AB_degree = dict(AB.degree()).values() AB_degree_distribution = Counter(AB_degree) # Plot the degree frequency distribution & # the probability density function plt.figure(figsize=(10,7)) x=[] y=[] for i in sorted(AB_degree_distribution): x.append(i) y.append(float(AB_degree_distribution[i])/len(AB)) plt.plot(np.array(x),np.array(y)) pwl.plot_pdf(list(AB_degree)) plt.xlabel('$k$', fontsize=18) plt.ylabel('$P(k)$', fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.yscale('log') plt.xscale('log') plt.axis([0,400,0.000,1]) plt.show() # Fit the degree distribution with a power law fit_function = pwl.Fit(list(AB_degree), xmin=11) # Output parameters print("alpha = ",fit_function.power_law.alpha) print("sigma = ",fit_function.power_law.sigma) print("x_min = ", fit_function.power_law.xmin) print("Kolmogorov-Smirnov distance = ",fit_function.power_law.D) ``` ### G vs. WS Watts stogatz netowrk combines small world (short average shortest path) with high clustering coefficient. This model starts from a reticule where each node is connected to its $d$ nearest neighbors,. and then with probability $r = 0.2$ each link is detached from one end and reformed with another random node. Let's compare G with random **Watts-Strogatz** graph with the same average connectivity and number of nodes.<br> ``` #let's find the rewiring rate r that best approximates G, in terms of average clustering coefficient. n = G.number_of_nodes() # nodal cardinality d = 2*int(G.number_of_edges() / G.number_of_nodes()) avg_clust_coeffs_ws = [] r_log_list = numpy.logspace(-5, 0, num=20, endpoint=True, base=10.0, dtype=None, axis=0) #print(r_list) runs = 50 for r in r_log_list: WS = nx.connected_watts_strogatz_graph(n, d, r, runs) #WS = nx.watts_strogatz_graph(n, d, r) avg_clust_coeffs_ws.append(nx.average_clustering(WS)) avg_clust_coeffs_ws_norm = [avg_clust_coeffs_ws[i]/avg_clust_coeffs_ws[0] for i in range(len(avg_clust_coeffs_ws))] plt.scatter(r_log_list,avg_clust_coeffs_ws_norm , marker = "o") plt.xscale("log") plt.xlabel("$r$") plt.axis([0.000005,1.32,-0.1,1.1]) plt.ylabel("$C(r)/C(0)$") plt.title("") plt.show() #find r value that best approximates G (in terms ofclustering coefficient; we couldn't choose the best compromise between average clustering coefficient and average shortest distance because the latter wouldhave taken too much time to evaluate for all r's) best_avg_cc = avg_clust_coeffs_ws[np.argmin([abs(avg_clust_coeffs_ws[i]-G_avg_cc) for i in range(len(avg_clust_coeffs_ws))])] best_r = r_log_list[np.argmin([abs(avg_clust_coeffs_ws[i]-G_avg_cc) for i in range(len(avg_clust_coeffs_ws))])] print("best rewiring rate = ",best_r, "\nbest_avg_cc = ",best_avg_cc ,"(",abs(best_avg_cc-G_avg_cc),"apart from G's one)") # Input parameters r = best_r WS = nx.connected_watts_strogatz_graph(n, d, r, runs) # Test connectedness print("Is the WS graph simply connected ?", nx.is_connected(WS)) # Connectivity print("The WS graph has", len(WS), "nodes", "and",len(WS.edges()),"edges .") # Average clustering coefficient print("The average clustering coefficient of WS is", nx.average_clustering(WS)) # Total number of triangles print("The global clustering coefficient is", nx.transitivity(WS)) # Average shortest path print("The WS graph has average shortest path = ", nx.average_shortest_path_length(WS), "\n.And we compare it with lnN/ln(<k>) = ", math.log(len(WS))/math.log(n*d/(n))) # Extract the degree distribution ws_degrees = (dict(WS.degree()).values()) # Plot the degree frequency distribution plt.figure(figsize=(10,7)) plt.hist(ws_degrees, bins=10) plt.xlabel('$k$', fontsize=18) plt.ylabel('$P(k)$', fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) ``` # Degree assortativity of a network A network is assortative with respect to a feature/features if nodes with similar feature(s) values are more often connected between them rather then with nodes having different feature(s) values.<br> The degree assortativity is assortativity with respect to degree: are nodes with similar degree more connected between themselves than with nodes with different degree?<br> Degree assortativity can be measured in different ways. A simple approach is measuring the average nearest neighbor degree to assess the level of degree-assortativity. ``` # degree assortativity can also be computed with nx's functions # Compute the degree assortativity coefficient of G and ER dac_G = nx.degree_assortativity_coefficient(G) # this is the pearson correlation coefficient of the red dots of the plot above. Infact, for the ER network it is close to zero, since in a ER network nodes are likely to connect regardless of their degree. dac_ER = nx.degree_assortativity_coefficient(ER) dac_AB = nx.degree_assortativity_coefficient(AB) dac_WS = nx.degree_assortativity_coefficient(WS) print("The degree assortativity coefficient of G is", dac_G, "\nwhile the degree assortativity coeffiecient of a ER graph is", dac_ER, "\nwhile the degree assortativity coeffiecient of a AB graph is" ,dac_AB, "\nwhile the degree assortativity coeffiecient of a WS graph is" ,dac_WS,) # Compute the Pearson / linear correlation coefficient with nx function pcc_G = nx.degree_pearson_correlation_coefficient(G) pcc_ER = nx.degree_pearson_correlation_coefficient(ER) pcc_AB = nx.degree_pearson_correlation_coefficient(AB) pcc_WS = nx.degree_pearson_correlation_coefficient(WS) print("The Pearson correlation coefficient of G is", pcc_G, "\nwhile the Pearson correlation coeffiecient of a ER graph is", pcc_ER, "\nwhile the Pearson correlation coeffiecient of a AB graph is" ,pcc_AB, "\nwhile the Pearson correlation coeffiecient of a WS graph is" ,pcc_WS,) ``` Anyway, thsi approachd oes not take into consideration possible nonlinear degree correlations. A less powerful but more general approach would be to measure the average nearest neighbor degree per degree class, in order to determine a possible trend and to compar eit with the expected average nearest neighbor degree per degree class if the network is uncorrelated, in which case: $$ k_{nn}^{unc}(k) = \frac{\langle k^2 \rangle}{\langle k \rangle}$$ ``` # Compute the average nearest neighbour degree for all the nodes in G x=[] y=[] avg_knn = defaultdict(list) # so for every node n, extract its degree k and append to the value of the defaultdict avg_knn corresponding to the key k the average degree of the neighbours of that node. SO avg_knn becomes a dict of the type {k:[ak_1,ak_2,..]} where ak_i is the average degree of the neighbours of the i-th node with degree k. Also save the k's in x and the average degrees of neighbours in y for n in G.nodes(): #k=soc.omit_by(dct = dict(G.degree())) k = G.degree(n) #nn=len(G.neighbors(n)) total=0 if k != 0: for j in G.neighbors(n): total += G.degree(j) avg_knn[k].append(float(total)/k) x.append(k) y.append(float(total)/k) else: avg_knn[k].append(0) x.append(k) y.append(0) avg_knn_sort = {i:np.mean(avg_knn[i]) for i in sorted(avg_knn.keys())} degree_distrib = {k:sum([1 if k == undirected_degree_distribution[i] else 0 for i in undirected_degree_distribution.keys()])/len(G) for k in np.unique(list(undirected_degree_distribution.values()))} degrees = list(degree_distrib.keys()) probs = list(degree_distrib.values()) #k_unc = <k^2>/<k> k_unc = sum([(degrees[k]**2)*probs[k] for k in range(len(degrees))])/sum([(degrees[k])*probs[k] for k in range(len(degrees))]) print("k_unc = ", k_unc) # Plot scatter average nearest neighbour degree per node and average degree connectivity and expected uncorrelated average neighbour degree per degree class vs. individual degree knn_avg4_items = nx.average_degree_connectivity(G).items() knn_avg4 = sorted(knn_avg4_items) # same as avg_knn_sort #print(type(knn_avg4),knn_avg4[0]) z = [t[1] for t in knn_avg4] plt.figure(figsize=(10,7)) plt.scatter(x,y, label='$k_{nn,i}$') plt.hlines(k_unc, 0, 500, colors='green', linestyles='solid', label='$k^{unc}_{nn}$', data=None) plt.plot(sorted(avg_knn.keys()), z,'r-', label='$k_{nn}(k)$') #plt.plot(sorted(avg_knn.keys()), z1,'g-') plt.legend(loc = 'lower left', fontsize = 15) plt.xlabel('$k_i$', fontsize=18) plt.ylabel('$k_{nn}$', fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.yscale('log') plt.xscale('log') plt.title('Degree Assortativity Analysis', fontsize = 17) # plt.axis([0.1,1000,1,1000]) plt.axis([0.1,800,1,500]) plt.show() # rank_nodes = list of nodes to remove (one by one, progressively) # returns a list of tuples ( number of removed nodes, size of gcc) where gcc is the giant connected component at that moment. The decay of the size of gcc is a measure of the robustness of the network, and/or ogf the efficiency of the attack. If importnat nodes are removed, size of gcc willl dcrease rapidly, while if less important nodes aare removed, gcc size will decrease with the number of (less important) nodes removed. def net_attack(graph, ranked_nodes): fraction_removed=[]#here we store the tuples: (%removed nodes, size of gcc) # make a copy of the graph to attack graph1=graph.copy() nnodes=len(ranked_nodes) n=0 gcc=sorted(list(nx.connected_components(graph1)),key=len, reverse=True)[0] gcc_size=float(len(gcc))/nnodes print("gcc_size = ", gcc_size) fraction_removed.append( (float(n)/nnodes, gcc_size) ) while gcc_size>0.01: #we start from the end of the list! graph1.remove_node(ranked_nodes.pop()) gcc=sorted(list(nx.connected_components(graph1)),key=len, reverse=True)[0] gcc_size=float(len(gcc))/nnodes n+=1 fraction_removed.append( (float(n)/nnodes, gcc_size) ) return fraction_removed # the attck sequence is the list of the airports in no particular order nodes=list(G.nodes()) resilience_random=net_attack(G, nodes) nodes_betw=[] betw=nx.betweenness_centrality(G) for i in sorted(betw.items(), key=itemgetter(1)): nodes_betw.append(i[0]) resilience_betw=net_attack(G, nodes_betw) nodes_degree=[] deg=dict(G.degree()) for i in sorted(deg.items(), key=itemgetter(1)): nodes_degree.append(i[0]) resilience_deg=net_attack(G, list(nodes_degree)) x=[k[0] for k in resilience_random] y=[k[1] for k in resilience_random] x1=[k[0] for k in resilience_deg] y1=[k[1] for k in resilience_deg] x2=[k[0] for k in resilience_betw] y2=[k[1] for k in resilience_betw] plt.figure(figsize=(10,7)) plt.plot(x,y, label='random attack') plt.plot(x1,y1, label='degree based') plt.plot(x2,y2, label='betw based') plt.xlabel('$f_{c}$', fontsize=22) plt.ylabel('$LCC$', fontsize=22) plt.xticks(fontsize=20) plt.yticks(fontsize=22) plt.axis([0,1,0,1]) plt.legend(loc='upper right', fontsize=20) # y-axis is the size of the largest connected component, normalized wit hthe initial gcc's size. x-axis is treh fraction of nodses removed. note that degree or betweennes-based attacks are more effective than random attack. thus a network with a broad p_k is very weak against degree/betweeness attacks ``` ## Stochastic SIR Epidemic on Static Network ``` # Model Parameters mu = 0.2 # Recovery rate lambd = 0.01 # Transmission rate per contact # Simulation Parameters nrun = 700 # Number of runs # Multi-Run Simulation runs = soc.network_SIR_multirun_simulation(G, nrun = nrun, lambd = lambd, mu = mu) # Set figure size plt.figure(figsize=(10,7)) # Plot the ensemble of trajectories soc.plot_ensemble(runs) ``` ### $\lambda$-Sensitivity of Final Epidemic Size ``` # Perform lambda-sensitivity analysis of final epidemic size (normalized attack rate) data = soc.network_SIR_finalsize_lambda_sensitivity(G, mu = mu, rho = 0.05, # rho = initial fraction infected lambda_min = 0.0001, lambda_max = 1.0, nruns = 20) # Show sensitivity dataset data # Set figure size plt.figure(figsize=(10,7)) # Display a boxplot with final epidemic size vs. transmission rate per edge/contact soc.boxplot_finalsize_lambda_sensitivity(G, mu = mu, data = data, ymin = 0.045, ymax= 1.1, xlim = (0.00007, 1.5) ) ```
github_jupyter
# Enable interactive numpy and matplotlib %pylab inline # Data Wrangling import pandas as pd import numpy as np # Data Analysis import powerlaw as pwl # Data Visualization import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker # Network Analysis import networkx as nx from networkx.algorithms import community import networkx.algorithms.centrality as nc import social_physics as soc # Network Epidemiology import EoN # Data Visualization import seaborn as sns from netwulf import visualize # Other Utilities import sys, os, os.path import itertools from progressbar import ProgressBar, Bar, Percentage from operator import itemgetter from collections import Counter from collections import defaultdict import random # Reload Custom Modules from importlib import reload soc = reload(soc) # Import graphml file G = nx.Graph(nx.read_graphml("/Users/pietromonticone/github/SocialPhysicsProject/Data/GraphML/Facebook1.graphml")) # Rename the graph G.name = "Facebook Friend EgoGraph" # Show the basic attributes of the graph print(nx.info(G)) # Relable the nodes (from strings of Twitter IDs to integers) G = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None) ![](./Images/Facebook/Facebook1.png) ## Graph Data Analysis Let's visulize normalized degree distribution. ### Degree Distribution ### Logarithmic Binning The black line is the empirical linearly binned pdf. The empirical PDF doesn't interpolate because it is obtained via linear binning while the red data points represent the logarithmic binning. ### Linear Binning The figure above interpolates because it uses linear binnig both for scatter plot and pdf binning. ### Power Law Fitting Here we estimate the measure to which the network follows a power law, and compare it with other common distributions. #### Parameters Estimation Because the fitted $x_{min} = 11$, let's require it to be a little higher prior to fitting Now the error (sigma) is way lower than before, but Kolmogorov-Smironv is higher as expected (because we fixed $x_{min}$ prior to fitting). Thus we confirmed that a power law fitting is rather good for this network. It is to be recalled that power laws are usually able to explain most of the variance though.<br> Let us now compare the actual pdf with the fitted power law near the tail.<br> <span style="color:blue">BLUE</span> : Fitted power law <br> <span style="color:black">BLACK</span> : plotted pdf Also, let's plot the power law fitted with the $x_{min}$ fixed. Let us now compare power law against other probability distributions. Remember that $R$ is the log-likelihood ratio between the two candidate distributions which will be positive if the data is more likely in the first distribution, and negative if the data is more likely in the second distribution. The significance value for that direction is $p$ (the smaller the better). Let us also compare with the truncated power law: ## Centrality Metrics Now we turn to plot centralities. #### Degree #### Closeness In connected graphs there is a natural distance metric between all pairs of nodes, defined by the length of their shortest paths. The '''farness''' of a node ''x'' is defined as the sum of its distances from all other nodes, and its closeness was defined by Bavelas as the reciprocal of the farness that is: <center> $C(x)= \frac{1}{\sum_y d(y,x)}.$ </center> Thus, the more central a node is the lower its total distance from all other nodes. Note that taking distances ''from'' or ''to'' all other nodes is irrelevant in undirected graphs, whereas in directed graphs distances ''to'' a node are considered a more meaningful measure of centrality, as in general (e.g., in, the web) a node has little control over its incoming links. #### Bewteenness #### Katz #### Eigenvector #### PageRank $$x_i=(1-\alpha) \sum_{j}A^{T}_{ij}\frac{x_j}{k^{out}_j}+\frac{\alpha}{N}$$ ## Connectivity ## Clustering Below the evaluation of the *average clustering coefficient* and the *global clustering coefficient* may be found. ### Global clustering coefficient The global clustering coefficient measures the number of triangles in the network and it's defined as $$ C_\Delta = \frac{3 \times \text{triangles}}{\text{triplets}} $$ In order to compare our graph with theorical models (of the same size), it is thus sufficient to evaluate the number of triangles ### Average clustering coefficient The overall level of clustering in a network is measured by Watts and Strogatz as the average of the local clustering coefficients of all the vertices $n$: $$\bar{C} = \frac{1}{n}\sum_{i=1}^{n} C_i.$$ It is worth noting that this metric places more weight on the low degree nodes, while the transitivity ratio places more weight on the high degree nodes. In fact, a weighted average where each local clustering score is weighted by $k_i(k_i-1)$ is identical to the global clustering coefficient. <br> As per [this](https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.cluster.clustering.html) and [this](https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/cluster.html ) resources we notice that Networkx's `average_clustering` function automatically takes care of the network being directed or not. ## Path-ology ### Average shortest path length ## Comparisons ### G vs. ER The most natural benchmark is a ER (random) network with the same number of nodes and links. In a ER netork, the p_k is poissonian ( an exponential decay) , so let's compare G with random **Erdos-Renyi** graph with the same average connectivity and number of nodes. ### G vs. AB Thinking about a broad (not exponential decaying) distribution, more like a power law, we may think about a AB network (albert-barabasi), so let's compare G with random **Albert-Barabasi** graph with the same average connectivity and number of nodes. Let's verify that an AB network follows a power law distribution ### G vs. WS Watts stogatz netowrk combines small world (short average shortest path) with high clustering coefficient. This model starts from a reticule where each node is connected to its $d$ nearest neighbors,. and then with probability $r = 0.2$ each link is detached from one end and reformed with another random node. Let's compare G with random **Watts-Strogatz** graph with the same average connectivity and number of nodes.<br> # Degree assortativity of a network A network is assortative with respect to a feature/features if nodes with similar feature(s) values are more often connected between them rather then with nodes having different feature(s) values.<br> The degree assortativity is assortativity with respect to degree: are nodes with similar degree more connected between themselves than with nodes with different degree?<br> Degree assortativity can be measured in different ways. A simple approach is measuring the average nearest neighbor degree to assess the level of degree-assortativity. Anyway, thsi approachd oes not take into consideration possible nonlinear degree correlations. A less powerful but more general approach would be to measure the average nearest neighbor degree per degree class, in order to determine a possible trend and to compar eit with the expected average nearest neighbor degree per degree class if the network is uncorrelated, in which case: $$ k_{nn}^{unc}(k) = \frac{\langle k^2 \rangle}{\langle k \rangle}$$ ## Stochastic SIR Epidemic on Static Network ### $\lambda$-Sensitivity of Final Epidemic Size
0.57093
0.889577
``` import matplotlib.pyplot as plt from scipy.signal import find_peaks import numpy as np from scipy.io import wavfile from IPython.display import Audio #allows for playing of audio import librosa import librosa.display #allows for spectrographs and other audio manipulation import pandas as pd from pydub import AudioSegment #allows for audio file slicing import math from scipy.fft import rfft, rfftfreq aj2 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ2.csv") aj3 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ3.csv") aj5 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ5.csv") aj13 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ13.csv") aj2_sound, aj2_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S0612_02.wav") aj3_sound, aj3_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S1121_03.wav") aj5_sound, aj5_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S1121_05.wav") aj13_sound, aj13_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S1366_13.wav") #Acinonyx files AJ2 = (aj2, aj2_sound) AJ3 = (aj3, aj3_sound) AJ5 = (aj5, aj5_sound) AJ13 = (aj13, aj13_sound) C1 = pd.read_csv(r".\Capstone Files\Caracal\C1.csv") C1_sound , C1_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_1_Male_Growl+Hiss.wav") C2 = pd.read_csv(r".\Capstone Files\Caracal\C2.csv") C2_sound , C2_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_2_Male_Growl+Hiss.wav") C3 = pd.read_csv(r".\Capstone Files\Caracal\C3.csv") C3_sound , C3_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_3___Growl+Hiss.wav") C4 = pd.read_csv(r".\Capstone Files\Caracal\C4.csv") C4_sound , C4_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_4___Growl.wav") C5 = pd.read_csv(r".\Capstone Files\Caracal\C5.csv") C5_sound , C5_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_5___Hiss+Growl.wav") C6 = pd.read_csv(r".\Capstone Files\Caracal\C6.csv") C6_sound , C6_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_6___Hiss+Growl.wav") #Caracal files C1T = (C1, C1_sound) C2T = (C2, C2_sound) C3T = (C3, C3_sound) C4T = (C4, C4_sound) C5T = (C5, C5_sound) C6T = (C6, C6_sound) D1 = pd.read_csv(r".\Capstone Files\Domestica\D1.csv") D1_sound , D1_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_S0050_01_pair_adult_hiss,growl.wav") D2 = pd.read_csv(r".\Capstone Files\Domestica\D2.csv") D2_sound , D2_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_S0013_01.female_adult_hiss,call,growlwav.wav") D3 = pd.read_csv(r".\Capstone Files\Domestica\D3.csv") D3_sound , D3_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_S0002_01_short_female_adult_growl, hiss.wav") D4 = pd.read_csv(r".\Capstone Files\Domestica\D4.csv") D4_sound, D4_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_DIG0089_01_male_juvenile_call,purr.wav") D5 = pd.read_csv(r".\Capstone Files\Domestica\D5.csv") D5_sound, D5_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_DIG0015_14_male_juvenile_purrpurr.wav") #Domestica files D1T = (D1, D1_sound) D2T = (D2, D2_sound) D3T = (D3, D3_sound) D4T = (D4, D4_sound) D5T = (D5, D5_sound) L1 = pd.read_csv(r".\Capstone Files\L. Lynx\L1.csv") L1_sound, L1_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_1.wav") L2 = pd.read_csv(r".\Capstone Files\L. Lynx\L2.csv") L2_sound, L2_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_2.wav") L3 = pd.read_csv(r".\Capstone Files\L. Lynx\L3.csv") L3_sound, L3_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_3.wav") L4 = pd.read_csv(r".\Capstone Files\L. Lynx\L4.csv") L4_sound, L4_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_4.wav") L5 = pd.read_csv(r".\Capstone Files\L. Lynx\L5.csv") L5_sound, L5_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_5.wav") L6 = pd.read_csv(r".\Capstone Files\L. Lynx\L6.csv") L6_sound, L6_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_6.wav") L7 = pd.read_csv(r".\Capstone Files\L. Lynx\L7.csv") L7_sound, L7_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_7.wav") L8 = pd.read_csv(r".\Capstone Files\L. Lynx\L8.csv") L8_sound, L8_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_8.wav") L9 = pd.read_csv(r".\Capstone Files\L. Lynx\L9.csv") L9_sound, L9_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_9.wav") L10 = pd.read_csv(r".\Capstone Files\L. Lynx\L10.csv") L10_sound, L10_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_10.wav") L11 = pd.read_csv(r".\Capstone Files\L. Lynx\L11.csv") L11_sound, L11_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_11.wav") L12 = pd.read_csv(r".\Capstone Files\L. Lynx\L12.csv") L12_sound, L12_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_12.wav") #L lynx audio files L1T = (L1, L1_sound) L2T = (L2, L2_sound) L3T = (L3, L3_sound) L4T = (L4, L4_sound) L5T = (L5, L5_sound) L6T = (L6, L6_sound) L7T = (L7, L7_sound) L8T = (L8, L8_sound) L9T = (L9, L9_sound) L10T = (L10, L10_sound) L11T = (L11, L11_sound) L12T = (L12, L12_sound) LR1 = pd.read_csv(r".\Capstone Files\L. Rufus\LR1.csv") LR1_sound, LR1_rate = librosa.load(r"C:\Users\exant\My Jupyter Files\Capstone\Capstone Files\L. Rufus\LR1.wav") LR2 = pd.read_csv(r".\Capstone Files\L. Rufus\LR2.csv") LR2_sound, LR2_rate = librosa.load(r".\Capstone Files\L. Rufus\LR2.wav") #Lynx Rufus LR1T = (LR1, LR1_sound) LR2T = (LR2, LR2_sound) LP1 = pd.read_csv(r".\Capstone Files\Leopardus\LP1.csv") LP1_sound, LP1_rate = librosa.load(r".\Capstone Files\Leopardus\LP1.wav") LP2 = pd.read_csv(r".\Capstone Files\Leopardus\LP2.csv") LP2_sound, LP2_rate = librosa.load(r".\Capstone Files\Leopardus\LP2.wav") LP3 = pd.read_csv(r".\Capstone Files\Leopardus\LP3.csv") LP3_sound, LP3_rate = librosa.load(r".\Capstone Files\Leopardus\LP3.wav") LP4 = pd.read_csv(r".\Capstone Files\Leopardus\LP4.csv") LP4_sound, LP4_rate = librosa.load(r".\Capstone Files\Leopardus\LP4.wav") #Leopardus Pardalis LP1T = (LP1, LP1_sound) LP2T = (LP2, LP2_sound) LP3T = (LP3, LP3_sound) LP4T = (LP4, LP4_sound) audio_list = [AJ2, AJ3, AJ5, AJ13, D1T, D2T, D3T, D4T, D5T, C1T, C2T, C3T, C4T, C5T, C6T, L1T, L2T, L3T, L4T, L5T, L6T, L7T, L8T, L9T, L10T, L11T, L12T, LR1T, LR2T, LP1T, LP2T, LP3T, LP4T] def frequency_range(audiofile): N= len(audiofile) y = np.abs(rfft(audiofile)) x = rfftfreq(N, 1/ 22050) tup = (x, y) lst = [] for x in range(len(tup[0])): if tup[1][x] > 2.5: lst.append(tup[0][x]) freq_range = max(lst) - min(lst) return freq_range def top_freq(file): f, t, mag = librosa.reassigned_spectrogram(file) mag_db = librosa.power_to_db(mag) freqs = [0]*len(f[0]) for y in range(len(f[0])): for x in range(len(f)): if mag_db[x][y] > -10: freqs[y] = f[x][y] return freqs ``` def pulses1(file): count = 0 frequencies = top_freq(file) for i in range(len(frequencies)): if i != 0 and i <= (len(frequencies)-2): if frequencies[i]>frequencies[i-1] and frequencies[i]>frequencies[i+1]: count += 1 #pulse per second pulse = count/(len(file)/22050) return pulse ``` def pulses(file): y = file #Find max peaks = find_peaks(y, height = 0, prominence = 0.01) max_height = peaks[1]['peak_heights'] #array of the heights of peaks pulse = len(peaks[0])/(len(file)/22050) return pulse def partials(file): count = 0 mags, freqs, line = plt.magnitude_spectrum(file, 22050) for i in range(len(mags)): if i > 100 and i <= (len(mags)-102): previous = [mags[i-x] for x in range(1,100)] post = [mags[i+x] for x in range(1,100)] if mags[i] > max(previous) and mags[i] > max(post) and mags[i] > 0.0005: count += 1 return count def extract_mfcc(file): mfcc = np.mean(librosa.feature.mfcc(y=file, sr=22050, n_mfcc=13).T,axis=0) return mfcc #Pass in a list of tuples, each tuple should contain two items, the first item will be the dataframe of the times #the second item should be the audio file itself def audio_feat_extractor(lst_tup): data = [] for tup in lst_tup: csv = tup[0] audio = tup[1] for x, y in csv.iterrows(): start = math.floor(y[3]) end = math.ceil(y[4]) temp = audio[start * 22050 : end *22050] zeros = sum(librosa.zero_crossings(temp)) duration = end - start positive = np.absolute(temp) amplitude_range = max(positive)-min(positive) average_amp = np.mean(positive) range_freq = frequency_range(temp) pulse = pulses(temp) partial = partials(temp) mfcc = extract_mfcc(temp) data.append([amplitude_range, average_amp, range_freq, pulse, partial, mfcc, duration, zeros, y[0], y[1], y[2],y[5]]) output = pd.DataFrame(data, columns = ['Amp_range','Avg_amp', 'Freq_range','Pulses_per_Sec','Partials', 'MFCC', 'Duration','Zero_Crossings','Species', 'Sex', 'Age','Call']) return output features_dataframe = audio_feat_extractor(audio_list) features_dataframe.to_csv('features.csv', index=False) ```
github_jupyter
import matplotlib.pyplot as plt from scipy.signal import find_peaks import numpy as np from scipy.io import wavfile from IPython.display import Audio #allows for playing of audio import librosa import librosa.display #allows for spectrographs and other audio manipulation import pandas as pd from pydub import AudioSegment #allows for audio file slicing import math from scipy.fft import rfft, rfftfreq aj2 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ2.csv") aj3 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ3.csv") aj5 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ5.csv") aj13 = pd.read_csv(r".\Capstone Files\A. jubatus\AJ13.csv") aj2_sound, aj2_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S0612_02.wav") aj3_sound, aj3_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S1121_03.wav") aj5_sound, aj5_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S1121_05.wav") aj13_sound, aj13_rate = librosa.load(r".\Capstone Files\A. jubatus\Acinonyx_jubatus_S1366_13.wav") #Acinonyx files AJ2 = (aj2, aj2_sound) AJ3 = (aj3, aj3_sound) AJ5 = (aj5, aj5_sound) AJ13 = (aj13, aj13_sound) C1 = pd.read_csv(r".\Capstone Files\Caracal\C1.csv") C1_sound , C1_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_1_Male_Growl+Hiss.wav") C2 = pd.read_csv(r".\Capstone Files\Caracal\C2.csv") C2_sound , C2_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_2_Male_Growl+Hiss.wav") C3 = pd.read_csv(r".\Capstone Files\Caracal\C3.csv") C3_sound , C3_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_3___Growl+Hiss.wav") C4 = pd.read_csv(r".\Capstone Files\Caracal\C4.csv") C4_sound , C4_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_4___Growl.wav") C5 = pd.read_csv(r".\Capstone Files\Caracal\C5.csv") C5_sound , C5_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_5___Hiss+Growl.wav") C6 = pd.read_csv(r".\Capstone Files\Caracal\C6.csv") C6_sound , C6_rate = librosa.load(r".\Capstone Files\Caracal\Caracal_6___Hiss+Growl.wav") #Caracal files C1T = (C1, C1_sound) C2T = (C2, C2_sound) C3T = (C3, C3_sound) C4T = (C4, C4_sound) C5T = (C5, C5_sound) C6T = (C6, C6_sound) D1 = pd.read_csv(r".\Capstone Files\Domestica\D1.csv") D1_sound , D1_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_S0050_01_pair_adult_hiss,growl.wav") D2 = pd.read_csv(r".\Capstone Files\Domestica\D2.csv") D2_sound , D2_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_S0013_01.female_adult_hiss,call,growlwav.wav") D3 = pd.read_csv(r".\Capstone Files\Domestica\D3.csv") D3_sound , D3_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_S0002_01_short_female_adult_growl, hiss.wav") D4 = pd.read_csv(r".\Capstone Files\Domestica\D4.csv") D4_sound, D4_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_DIG0089_01_male_juvenile_call,purr.wav") D5 = pd.read_csv(r".\Capstone Files\Domestica\D5.csv") D5_sound, D5_rate = librosa.load(r".\Capstone Files\Domestica\Felis_silvestris_f_domestica_DIG0015_14_male_juvenile_purrpurr.wav") #Domestica files D1T = (D1, D1_sound) D2T = (D2, D2_sound) D3T = (D3, D3_sound) D4T = (D4, D4_sound) D5T = (D5, D5_sound) L1 = pd.read_csv(r".\Capstone Files\L. Lynx\L1.csv") L1_sound, L1_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_1.wav") L2 = pd.read_csv(r".\Capstone Files\L. Lynx\L2.csv") L2_sound, L2_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_2.wav") L3 = pd.read_csv(r".\Capstone Files\L. Lynx\L3.csv") L3_sound, L3_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_3.wav") L4 = pd.read_csv(r".\Capstone Files\L. Lynx\L4.csv") L4_sound, L4_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_4.wav") L5 = pd.read_csv(r".\Capstone Files\L. Lynx\L5.csv") L5_sound, L5_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_5.wav") L6 = pd.read_csv(r".\Capstone Files\L. Lynx\L6.csv") L6_sound, L6_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_6.wav") L7 = pd.read_csv(r".\Capstone Files\L. Lynx\L7.csv") L7_sound, L7_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_7.wav") L8 = pd.read_csv(r".\Capstone Files\L. Lynx\L8.csv") L8_sound, L8_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_8.wav") L9 = pd.read_csv(r".\Capstone Files\L. Lynx\L9.csv") L9_sound, L9_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_9.wav") L10 = pd.read_csv(r".\Capstone Files\L. Lynx\L10.csv") L10_sound, L10_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_10.wav") L11 = pd.read_csv(r".\Capstone Files\L. Lynx\L11.csv") L11_sound, L11_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_11.wav") L12 = pd.read_csv(r".\Capstone Files\L. Lynx\L12.csv") L12_sound, L12_rate = librosa.load(r".\Capstone Files\L. Lynx\Lynx_lynx_12.wav") #L lynx audio files L1T = (L1, L1_sound) L2T = (L2, L2_sound) L3T = (L3, L3_sound) L4T = (L4, L4_sound) L5T = (L5, L5_sound) L6T = (L6, L6_sound) L7T = (L7, L7_sound) L8T = (L8, L8_sound) L9T = (L9, L9_sound) L10T = (L10, L10_sound) L11T = (L11, L11_sound) L12T = (L12, L12_sound) LR1 = pd.read_csv(r".\Capstone Files\L. Rufus\LR1.csv") LR1_sound, LR1_rate = librosa.load(r"C:\Users\exant\My Jupyter Files\Capstone\Capstone Files\L. Rufus\LR1.wav") LR2 = pd.read_csv(r".\Capstone Files\L. Rufus\LR2.csv") LR2_sound, LR2_rate = librosa.load(r".\Capstone Files\L. Rufus\LR2.wav") #Lynx Rufus LR1T = (LR1, LR1_sound) LR2T = (LR2, LR2_sound) LP1 = pd.read_csv(r".\Capstone Files\Leopardus\LP1.csv") LP1_sound, LP1_rate = librosa.load(r".\Capstone Files\Leopardus\LP1.wav") LP2 = pd.read_csv(r".\Capstone Files\Leopardus\LP2.csv") LP2_sound, LP2_rate = librosa.load(r".\Capstone Files\Leopardus\LP2.wav") LP3 = pd.read_csv(r".\Capstone Files\Leopardus\LP3.csv") LP3_sound, LP3_rate = librosa.load(r".\Capstone Files\Leopardus\LP3.wav") LP4 = pd.read_csv(r".\Capstone Files\Leopardus\LP4.csv") LP4_sound, LP4_rate = librosa.load(r".\Capstone Files\Leopardus\LP4.wav") #Leopardus Pardalis LP1T = (LP1, LP1_sound) LP2T = (LP2, LP2_sound) LP3T = (LP3, LP3_sound) LP4T = (LP4, LP4_sound) audio_list = [AJ2, AJ3, AJ5, AJ13, D1T, D2T, D3T, D4T, D5T, C1T, C2T, C3T, C4T, C5T, C6T, L1T, L2T, L3T, L4T, L5T, L6T, L7T, L8T, L9T, L10T, L11T, L12T, LR1T, LR2T, LP1T, LP2T, LP3T, LP4T] def frequency_range(audiofile): N= len(audiofile) y = np.abs(rfft(audiofile)) x = rfftfreq(N, 1/ 22050) tup = (x, y) lst = [] for x in range(len(tup[0])): if tup[1][x] > 2.5: lst.append(tup[0][x]) freq_range = max(lst) - min(lst) return freq_range def top_freq(file): f, t, mag = librosa.reassigned_spectrogram(file) mag_db = librosa.power_to_db(mag) freqs = [0]*len(f[0]) for y in range(len(f[0])): for x in range(len(f)): if mag_db[x][y] > -10: freqs[y] = f[x][y] return freqs def pulses(file): y = file #Find max peaks = find_peaks(y, height = 0, prominence = 0.01) max_height = peaks[1]['peak_heights'] #array of the heights of peaks pulse = len(peaks[0])/(len(file)/22050) return pulse def partials(file): count = 0 mags, freqs, line = plt.magnitude_spectrum(file, 22050) for i in range(len(mags)): if i > 100 and i <= (len(mags)-102): previous = [mags[i-x] for x in range(1,100)] post = [mags[i+x] for x in range(1,100)] if mags[i] > max(previous) and mags[i] > max(post) and mags[i] > 0.0005: count += 1 return count def extract_mfcc(file): mfcc = np.mean(librosa.feature.mfcc(y=file, sr=22050, n_mfcc=13).T,axis=0) return mfcc #Pass in a list of tuples, each tuple should contain two items, the first item will be the dataframe of the times #the second item should be the audio file itself def audio_feat_extractor(lst_tup): data = [] for tup in lst_tup: csv = tup[0] audio = tup[1] for x, y in csv.iterrows(): start = math.floor(y[3]) end = math.ceil(y[4]) temp = audio[start * 22050 : end *22050] zeros = sum(librosa.zero_crossings(temp)) duration = end - start positive = np.absolute(temp) amplitude_range = max(positive)-min(positive) average_amp = np.mean(positive) range_freq = frequency_range(temp) pulse = pulses(temp) partial = partials(temp) mfcc = extract_mfcc(temp) data.append([amplitude_range, average_amp, range_freq, pulse, partial, mfcc, duration, zeros, y[0], y[1], y[2],y[5]]) output = pd.DataFrame(data, columns = ['Amp_range','Avg_amp', 'Freq_range','Pulses_per_Sec','Partials', 'MFCC', 'Duration','Zero_Crossings','Species', 'Sex', 'Age','Call']) return output features_dataframe = audio_feat_extractor(audio_list) features_dataframe.to_csv('features.csv', index=False)
0.234144
0.208018
# Univariate plotting with pandas <table> <tr> <td><img src="https://i.imgur.com/skaZPhb.png" width="350px"/></td> <td><img src="https://i.imgur.com/gaNttYd.png" width="350px"/></td> <td><img src="https://i.imgur.com/pampioh.png"/></td> <td><img src="https://i.imgur.com/OSbuszd.png"/></td> <!--<td><img src="https://i.imgur.com/ydaMhT1.png" width="350px"/></td> <td><img src="https://i.imgur.com/WLAqDSV.png" width="350px"/></td> <td><img src="https://i.imgur.com/Tj2y9gH.png"/></td> <td><img src="https://i.imgur.com/X0qXLCu.png"/></td>--> </tr> <tr> <td style="font-weight:bold; font-size:16px;">Bar Chat</td> <td style="font-weight:bold; font-size:16px;">Line Chart</td> <td style="font-weight:bold; font-size:16px;">Area Chart</td> <td style="font-weight:bold; font-size:16px;">Histogram</td> </tr> <tr> <td>df.plot.bar()</td> <td>df.plot.line()</td> <td>df.plot.area()</td> <td>df.plot.hist()</td> </tr> <tr> <td>Good for nominal and small ordinal categorical data.</td> <td> Good for ordinal categorical and interval data.</td> <td>Good for ordinal categorical and interval data.</td> <td>Good for interval data.</td> </tr> </table> The `pandas` library is the core library for Python data analysis: the "killer feature" that makes the entire ecosystem stick together. However, it can do more than load and transform your data: it can visualize it too! Indeed, the easy-to-use and expressive pandas plotting API is a big part of `pandas` popularity. In this section we will learn the basic `pandas` plotting facilities, starting with the simplest type of visualization: single-variable or "univariate" visualizations. This includes basic tools like bar plots and line charts. Through these we'll get an understanding of `pandas` plotting library structure, and spend some time examining data types. ``` import pandas as pd reviews = pd.read_csv("../input/wine-reviews/winemag-data_first150k.csv", index_col=0) reviews.head(3) ``` ## Bar charts and categorical data Bar charts are arguably the simplest data visualization. They map categories to numbers: the amount of eggs consumed for breakfast (a category) to a number breakfast-eating Americans, for example; or, in our case, wine-producing provinces of the world (category) to the number of labels of wines they produce (number): ``` reviews['province'].value_counts().head(10).plot.bar() ``` What does this plot tell us? It says California produces far more wine than any other province of the world! We might ask what percent of the total is Californian vintage? This bar chart tells us absolute numbers, but it's more useful to know relative proportions. No problem: ``` (reviews['province'].value_counts().head(10) / len(reviews)).plot.bar() ``` California produces almost a third of wines reviewed in Wine Magazine! Bar charts are very flexible: The height can represent anything, as long as it is a number. And each bar can represent anything, as long as it is a category. In this case the categories are **nominal categories**: "pure" categories that don't make a lot of sense to order. Nominal categorical variables include things like countries, ZIP codes, types of cheese, and lunar landers. The other kind are **ordinal categories**: things that do make sense to compare, like earthquake magnitudes, housing complexes with certain numbers of apartments, and the sizes of bags of chips at your local deli. Or, in our case, the number of reviews of a certain score allotted by Wine Magazine: ``` reviews['points'].value_counts().sort_index().plot.bar() ``` As you can see, every vintage is allotted an overall score between 80 and 100; and, if we are to believe that Wine Magazine is an arbiter of good taste, then a 92 is somehow meaningfully "better" than a 91. ## Line charts The wine review scorecard has 20 different unique values to fill, for which our bar chart is just barely enough. What would we do if the magazine rated things 0-100? We'd have 100 different categories; simply too many to fit a bar in for each one! In that case, instead of bar chart, we could use a line chart: ``` reviews['points'].value_counts().sort_index().plot.line() ``` A line chart can pass over any number of many individual values, making it the tool of first choice for distributions with many unique values or categories. However, line charts have an important weakness: unlike bar charts, they're not appropriate for nominal categorical data. While bar charts distinguish between every "type" of point line charts mushes them together. So a line chart asserts an order to the values on the horizontal axis, and the order won’t make sense with some data. After all, a "descent" from California to Washington to Tuscany doesn't mean much! Line charts also make it harder to distinguish between individual values. In general, if your data can fit into a bar chart, just use a bar chart! ## Quick break: bar or line Let's do a quick exercise. Suppose that we're interested in counting the following variables: 1. The number of tubs of ice cream purchased by flavor, given that there are 5 different flavors. 2. The average number of cars purchased from American car manufacturers in Michigan. 3. Test scores given to students by teachers at a college, on a 0-100 scale. 4. The number of restaurants located on the street by the name of the street in Lower Manhattan. For which of these would a bar chart be better? Which ones would be better off with a line? To see the answer, click the "Output" button on the code block below. ``` raw = """ <ol> <li>This is a simple nominal categorical variable. Five bars will fit easily into a display, so a bar chart will do!</li> <br/> <li>This example is similar: nominal categorical variables. There are probably more than five American car manufacturers, so the chart will be a little more crowded, but a bar chart will still do it.</li> <br/> <li>This is an ordinal categorical variable. We have a lot of potential values between 0 and 100, so a bar chart won't have enough room. A line chart is better.</li> <br/> <li> <p>Number 4 is a lot harder. City streets are obviously ordinary categorical variables, so we *ought* to use a bar chart; but there are a lot of streets out there! We couldn't possibly fit all of them into a display.</p> <p>Sometimes, your data will have too many points to do something "neatly", and that's OK. If you organize the data by value count and plot a line chart over that, you'll learn valuable information about *percentiles*: that a street in the 90th percentile has 20 restaurants, for example, or one in the 50th just 6. This is basically a form of aggregation: we've turned streets into percentiles!</p> <p>The lesson: your *interpretation* of the data is more important than the tool that you use.</p></li> </ol> """ from IPython.display import HTML HTML(raw) ``` ## Area charts Area charts are just line charts, but with the bottom shaded in. That's it! ``` reviews['points'].value_counts().sort_index().plot.area() ``` When plotting only one variable, the difference between an area chart and a line chart is mostly visual. In this context, they can be used interchangably. ## Interval data Let's move on by looking at yet another type of data, an **interval variable**. Examples of interval variables are the wind speed in a hurricane, shear strength in concrete, and the temperature of the sun. An interval variable goes beyond an ordinal categorical variable: it has a *meaningful* order, in the sense that we can quantify what the difference between two entries is itself an interval variable. For example, if I say that this sample of water is -20 degrees Celcius, and this other sample is 120 degrees Celcius, then I can quantify the difference between them: 140 degrees "worth" of heat, or such-and-such many joules of energy. The difference can be qualitative sometimes. At a minimum, being able to state something so clearly feels a lot more "measured" than, say, saying you'll buy this wine and not that one, because this one scored a 92 on some taste test and that one only got an 85. More definitively, any variable that has infinitely many possible values is definitely an interval variable (why not 120.1 degrees? 120.001? 120.0000000001? Etc). Line charts work well for interval data. Bar charts don't—unless your ability to measure it is very limited, interval data will naturally vary by quite a lot. Let's apply a new tool, the histogram, to an interval variable in our dataset, price (we'll cut price off at 200$ a bottle; more on why shortly). ## Histograms Here's a histogram: ``` reviews[reviews['price'] < 200]['price'].plot.hist() ``` A histogram looks, trivially, like a bar plot. And it basically is! In fact, a histogram is special kind of bar plot that splits your data into even intervals and displays how many rows are in each interval with bars. The only analytical difference is that instead of each bar representing a single value, it represents a range of values. However, histograms have one major shortcoming (the reason for our 200$ caveat earlier). Because they break space up into even intervals, they don't deal very well with skewed data: ``` reviews['price'].plot.hist() ``` This is the real reason I excluded the >$200 bottles earlier; some of these vintages are really expensive! And the chart will "grow" to include them, to the detriment of the rest of the data being shown. ``` reviews[reviews['price'] > 1500] ``` There are many ways of dealing with the skewed data problem; those are outside the scope of this tutorial. The easiest is to just do what I did: cut things off at a sensible level. This phenomenon is known (statistically) as **skew**, and it's a fairly common occurance among interval variables. Histograms work best for interval variables without skew. They also work really well for ordinal categorical variables like `points`: ``` reviews['points'].plot.hist() ``` ## Exercise: bar, line/area, or histogram? Let's do another exercise. What would the best chart type be for: 1. The volume of apples picked at an orchard based on the type of apple (Granny Smith, Fuji, etcetera). 2. The number of points won in all basketball games in a season. 3. The count of apartment buildings in Chicago by the number of individual units. To see the answer, click the "Output" button on the code block below. ``` raw = """ <ol> <li>Example number 1 is a nominal categorical example, and hence, a pretty straightfoward bar graph target.</li> <br/> <li>Example 2 is a large nominal categorical variable. A basketball game team can score between 50 and 150 points, too much for a bar chart; a line chart is a good way to go. A histogram could also work.</li> <br/> <li>Example 3 is an interval variable: a single building can have anywhere between 1 and 1000 or more apartment units. A line chart could work, but a histogram would probably work better! Note that this distribution is going to have a lot of skew (there is only a handful of very, very large apartment buildings).</li> </ol> """ from IPython.display import HTML HTML(raw) ``` ## Conclusion and exercise In this section of the tutorial we learned about the handful of different kinds of data, and looked at some of the built-in tools that `pandas` provides us for plotting them. Now it's your turn! For these exercises, we'll be working with the Pokemon dataset (because what goes together better than wine and Pokemon?). ``` import pandas as pd pd.set_option('max_columns', None) pokemon = pd.read_csv("../input/pokemon/pokemon.csv") pokemon.head(3) ``` Try forking this kernel, and see if you can replicate the following plots. To see the answers, click the "Code" button to unhide the code and see the answers. The frequency of Pokemon by type: ``` pokemon['type1'].value_counts().plot.bar() ``` The frequency of Pokemon by HP stat total: ``` pokemon['hp'].value_counts().sort_index().plot.line() ``` The frequency of Pokemon by weight: ``` pokemon['weight_kg'].plot.hist(bins=20) ``` [Click here to move on to "Bivariate plotting with pandas"](https://www.kaggle.com/residentmario/bivariate-plotting-with-pandas/). You may also want to take a look at [the addendum on pie charts](https://www.kaggle.com/residentmario/data-visualization-addendum-pie-charts/).
github_jupyter
import pandas as pd reviews = pd.read_csv("../input/wine-reviews/winemag-data_first150k.csv", index_col=0) reviews.head(3) reviews['province'].value_counts().head(10).plot.bar() (reviews['province'].value_counts().head(10) / len(reviews)).plot.bar() reviews['points'].value_counts().sort_index().plot.bar() reviews['points'].value_counts().sort_index().plot.line() raw = """ <ol> <li>This is a simple nominal categorical variable. Five bars will fit easily into a display, so a bar chart will do!</li> <br/> <li>This example is similar: nominal categorical variables. There are probably more than five American car manufacturers, so the chart will be a little more crowded, but a bar chart will still do it.</li> <br/> <li>This is an ordinal categorical variable. We have a lot of potential values between 0 and 100, so a bar chart won't have enough room. A line chart is better.</li> <br/> <li> <p>Number 4 is a lot harder. City streets are obviously ordinary categorical variables, so we *ought* to use a bar chart; but there are a lot of streets out there! We couldn't possibly fit all of them into a display.</p> <p>Sometimes, your data will have too many points to do something "neatly", and that's OK. If you organize the data by value count and plot a line chart over that, you'll learn valuable information about *percentiles*: that a street in the 90th percentile has 20 restaurants, for example, or one in the 50th just 6. This is basically a form of aggregation: we've turned streets into percentiles!</p> <p>The lesson: your *interpretation* of the data is more important than the tool that you use.</p></li> </ol> """ from IPython.display import HTML HTML(raw) reviews['points'].value_counts().sort_index().plot.area() reviews[reviews['price'] < 200]['price'].plot.hist() reviews['price'].plot.hist() reviews[reviews['price'] > 1500] reviews['points'].plot.hist() raw = """ <ol> <li>Example number 1 is a nominal categorical example, and hence, a pretty straightfoward bar graph target.</li> <br/> <li>Example 2 is a large nominal categorical variable. A basketball game team can score between 50 and 150 points, too much for a bar chart; a line chart is a good way to go. A histogram could also work.</li> <br/> <li>Example 3 is an interval variable: a single building can have anywhere between 1 and 1000 or more apartment units. A line chart could work, but a histogram would probably work better! Note that this distribution is going to have a lot of skew (there is only a handful of very, very large apartment buildings).</li> </ol> """ from IPython.display import HTML HTML(raw) import pandas as pd pd.set_option('max_columns', None) pokemon = pd.read_csv("../input/pokemon/pokemon.csv") pokemon.head(3) pokemon['type1'].value_counts().plot.bar() pokemon['hp'].value_counts().sort_index().plot.line() pokemon['weight_kg'].plot.hist(bins=20)
0.512693
0.993655
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-03/lab.ipynb) # Lab 3 - Monte Carlo Tree Search In this exercise we will use the same game as in the previous exercise, namely, Connect 4. ([Connect 4](https://en.wikipedia.org/wiki/Connect_Four)). You should implement the MCTS algorithm to play the game. As before, the game is implemented below. It will play a game where both players take random (legal) actions. The MAX player is represented with a X and the MIN player with an O. The MAX player starts. Execute the code. ``` import random from copy import deepcopy from typing import Sequence NONE = '.' MAX = 'X' MIN = 'O' COLS = 7 ROWS = 6 N_WIN = 4 class ArrayState: def __init__(self, board, heights, n_moves): self.board = board self.heights = heights self.n_moves = n_moves @staticmethod def init(): board = [[NONE] * ROWS for _ in range(COLS)] return ArrayState(board, [0] * COLS, 0) def result(state: ArrayState, action: int) -> ArrayState: """Insert in the given column.""" assert 0 <= action < COLS, "action must be a column number" if state.heights[action] >= ROWS: raise Exception('Column is full') player = MAX if state.n_moves % 2 == 0 else MIN board = deepcopy(state.board) board[action][ROWS - state.heights[action] - 1] = player heights = deepcopy(state.heights) heights[action] += 1 return ArrayState(board, heights, state.n_moves + 1) def actions(state: ArrayState) -> Sequence[int]: return [i for i in range(COLS) if state.heights[i] < ROWS] def branch_states(state: ArrayState) -> Sequence[ArrayState]: """get all reachable states from the current state: useful for MCTS """ return [result(state, a) for a in actions(state)] def utility(state: ArrayState) -> float: """Get the winner on the current board.""" board = state.board def diagonalsPos(): """Get positive diagonals, going from bottom-left to top-right.""" for di in ([(j, i - j) for j in range(COLS)] for i in range(COLS + ROWS - 1)): yield [board[i][j] for i, j in di if i >= 0 and j >= 0 and i < COLS and j < ROWS] def diagonalsNeg(): """Get negative diagonals, going from top-left to bottom-right.""" for di in ([(j, i - COLS + j + 1) for j in range(COLS)] for i in range(COLS + ROWS - 1)): yield [board[i][j] for i, j in di if i >= 0 and j >= 0 and i < COLS and j < ROWS] lines = board + \ list(zip(*board)) + \ list(diagonalsNeg()) + \ list(diagonalsPos()) max_win = MAX * N_WIN min_win = MIN * N_WIN for line in lines: str_line = "".join(line) if max_win in str_line: return 1 elif min_win in str_line: return -1 return 0 def terminal_test(state: ArrayState) -> bool: return state.n_moves >= COLS * ROWS or utility(state) != 0 def printBoard(state: ArrayState): board = state.board """Print the board.""" print(' '.join(map(str, range(COLS)))) for y in range(ROWS): print(' '.join(str(board[x][y]) for x in range(COLS))) print() s = ArrayState.init() while not terminal_test(s): a = random.choice(actions(s)) s = result(s, a) printBoard(s) print(utility(s)) ``` The last number 0, -1 or 1 is the utility or score of the game. 0 means it was a draw, 1 means MAX player won and -1 means MIN player won. ### Exercise 1 (Transfer code from the previous exercise) Modify the code so that you can play manually as the MIN player against the random AI. ``` ### Code here! ``` ### Exercise 2 Implement the standard MCTS algorithm. ``` from abc import ABC, abstractmethod from collections import defaultdict import math class MCTS: "Monte Carlo tree searcher." def __init__(self, exploration_weight=1): pass def choose(self, state : ArrayState) -> ArrayState: "Choose a move in the game and execute it" pass def do_rollout(self, state : ArrayState): "Train for one iteration." pass def _select(self, state : ArrayState): "Find an unexplored descendent of the `state`" pass def _expand(self, state : ArrayState): "Expand the `state` with all states reachable from it" pass def _simulate(self, state : ArrayState): "Returns the reward for a random simulation (to completion) of the `state`" pass def _backpropagate(self, path, reward): "Send the reward back up to the ancestors of the leaf" pass def _uct_select(self, state : ArrayState): "Select a child of state, balancing exploration & exploitation" pass ``` ### Exercise 3 Implement the loop where you play against your MCTS agent. Either train the agent at each step while you play against it, or pretrain it with more rollouts and play agaist it after training. ``` def train_model(state : ArrayState, num_rollouts : int): pass ## Play against the MCTS agent pass ``` ### Exercise 4 Add move ordering. The middle columns are often "better" since there's more winning positions that contain them. Increase the probability to choose middle columns when randomly executing rollouts: [3,2,4,1,5,0,6]. See if your connect4 AI can beat you. ### Exercise 5 - Optional Pit your MCTS agent against the one from the previous exercise. * Which one wins more often? * Which one takes more time to run per step once it is at a point that it can beat you?
github_jupyter
import random from copy import deepcopy from typing import Sequence NONE = '.' MAX = 'X' MIN = 'O' COLS = 7 ROWS = 6 N_WIN = 4 class ArrayState: def __init__(self, board, heights, n_moves): self.board = board self.heights = heights self.n_moves = n_moves @staticmethod def init(): board = [[NONE] * ROWS for _ in range(COLS)] return ArrayState(board, [0] * COLS, 0) def result(state: ArrayState, action: int) -> ArrayState: """Insert in the given column.""" assert 0 <= action < COLS, "action must be a column number" if state.heights[action] >= ROWS: raise Exception('Column is full') player = MAX if state.n_moves % 2 == 0 else MIN board = deepcopy(state.board) board[action][ROWS - state.heights[action] - 1] = player heights = deepcopy(state.heights) heights[action] += 1 return ArrayState(board, heights, state.n_moves + 1) def actions(state: ArrayState) -> Sequence[int]: return [i for i in range(COLS) if state.heights[i] < ROWS] def branch_states(state: ArrayState) -> Sequence[ArrayState]: """get all reachable states from the current state: useful for MCTS """ return [result(state, a) for a in actions(state)] def utility(state: ArrayState) -> float: """Get the winner on the current board.""" board = state.board def diagonalsPos(): """Get positive diagonals, going from bottom-left to top-right.""" for di in ([(j, i - j) for j in range(COLS)] for i in range(COLS + ROWS - 1)): yield [board[i][j] for i, j in di if i >= 0 and j >= 0 and i < COLS and j < ROWS] def diagonalsNeg(): """Get negative diagonals, going from top-left to bottom-right.""" for di in ([(j, i - COLS + j + 1) for j in range(COLS)] for i in range(COLS + ROWS - 1)): yield [board[i][j] for i, j in di if i >= 0 and j >= 0 and i < COLS and j < ROWS] lines = board + \ list(zip(*board)) + \ list(diagonalsNeg()) + \ list(diagonalsPos()) max_win = MAX * N_WIN min_win = MIN * N_WIN for line in lines: str_line = "".join(line) if max_win in str_line: return 1 elif min_win in str_line: return -1 return 0 def terminal_test(state: ArrayState) -> bool: return state.n_moves >= COLS * ROWS or utility(state) != 0 def printBoard(state: ArrayState): board = state.board """Print the board.""" print(' '.join(map(str, range(COLS)))) for y in range(ROWS): print(' '.join(str(board[x][y]) for x in range(COLS))) print() s = ArrayState.init() while not terminal_test(s): a = random.choice(actions(s)) s = result(s, a) printBoard(s) print(utility(s)) ### Code here! from abc import ABC, abstractmethod from collections import defaultdict import math class MCTS: "Monte Carlo tree searcher." def __init__(self, exploration_weight=1): pass def choose(self, state : ArrayState) -> ArrayState: "Choose a move in the game and execute it" pass def do_rollout(self, state : ArrayState): "Train for one iteration." pass def _select(self, state : ArrayState): "Find an unexplored descendent of the `state`" pass def _expand(self, state : ArrayState): "Expand the `state` with all states reachable from it" pass def _simulate(self, state : ArrayState): "Returns the reward for a random simulation (to completion) of the `state`" pass def _backpropagate(self, path, reward): "Send the reward back up to the ancestors of the leaf" pass def _uct_select(self, state : ArrayState): "Select a child of state, balancing exploration & exploitation" pass def train_model(state : ArrayState, num_rollouts : int): pass ## Play against the MCTS agent pass
0.840259
0.971564
``` from abc import ABCMeta, abstractmethod, abstractproperty import enum import numpy as np np.set_printoptions(precision=3) np.set_printoptions(suppress=True) import pandas from matplotlib import pyplot as plt %matplotlib inline ``` ## Bernoulli Bandit We are going to implement several exploration strategies for simplest problem - bernoulli bandit. The bandit has $K$ actions. Action produce 1.0 reward $r$ with probability $0 \le \theta_k \le 1$ which is unknown to agent, but fixed over time. Agent's objective is to minimize regret over fixed number $T$ of action selections: $$\rho = T\theta^* - \sum_{t=1}^T r_t$$ Where $\theta^* = \max_k\{\theta_k\}$ **Real-world analogy:** Clinical trials - we have $K$ pills and $T$ ill patient. After taking pill, patient is cured with probability $\theta_k$. Task is to find most efficient pill. A research on clinical trials - https://arxiv.org/pdf/1507.08025.pdf ``` class BernoulliBandit: def __init__(self, n_actions=5): self._probs = np.random.random(n_actions) @property def action_count(self): return len(self._probs) def pull(self, action): if np.random.random() > self._probs[action]: return 0.0 return 1.0 def optimal_reward(self): """ Used for regret calculation """ return np.max(self._probs) def step(self): """ Used in nonstationary version """ pass def reset(self): """ Used in nonstationary version """ class AbstractAgent(metaclass=ABCMeta): def init_actions(self, n_actions): self._successes = np.zeros(n_actions) self._failures = np.zeros(n_actions) self._total_pulls = 0 @abstractmethod def get_action(self): """ Get current best action :rtype: int """ pass def update(self, action, reward): """ Observe reward from action and update agent's internal parameters :type action: int :type reward: int """ self._total_pulls += 1 if reward == 1: self._successes[action] += 1 else: self._failures[action] += 1 @property def name(self): return self.__class__.__name__ class RandomAgent(AbstractAgent): def get_action(self): return np.random.randint(0, len(self._successes)) ``` ### Epsilon-greedy agent For the above agent class, $\alpha_k$ and $\beta_k$ correspond to $successes$ and $failures$ respectively. > **for** $t = 1,2,...$ **do** >> **for** $k = 1,...,K$ **do** >>> $\hat\theta_k \leftarrow \alpha_k / (\alpha_k + \beta_k)$ >> **end for** >> $x_t \leftarrow argmax_{k}\hat\theta$ with probability $1 - \epsilon$ or random action with probability $\epsilon$ >> Apply $x_t$ and observe $r_t$ >> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ > **end for** Implement the algorithm above in the cell below: ``` class EpsilonGreedyAgent(AbstractAgent): def __init__(self, epsilon = 0.01): self._epsilon = epsilon def get_action(self): n_actions = self._successes + self._failures + 1e-8 p = self._successes / n_actions # alpha / (alpha + beta) # explore if np.random.random() < self._epsilon: return np.random.randint(0, len(self._successes)) #exploit else: return np.argmax(p) @property def name(self): return self.__class__.__name__ + "(epsilon={})".format(self._epsilon) ``` ### UCB Agent Epsilon-greedy strategy heve no preference for actions. It would be better to select among actions that are uncertain or have potential to be optimal. One can come up with idea of index for each action that represents otimality and uncertainty at the same time. One efficient way to do it is to use UCB1 algorithm: > **for** $t = 1,2,...$ **do** >> **for** $k = 1,...,K$ **do** >>> $w_k \leftarrow \alpha_k / (\alpha_k + \beta_k) + \sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$ >> **end for** >> $x_t \leftarrow argmax_{k}w$ >> Apply $x_t$ and observe $r_t$ >> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ > **end for** __Note:__ in practice, one can multiply $\sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$ by some tunable parameter to regulate agent's optimism and wilingness to abandon non-promising actions. More versions and optimality analysis - https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf ``` class UCBAgent(AbstractAgent): def get_action(self): n_actions = self._successes + self._failures + 1e-8 p = self._successes / n_actions + \ np.sqrt(2*np.log10(self._total_pulls+1e-8)/n_actions)# alpha / (alpha + beta) return np.argmax(p) @property def name(self): return self.__class__.__name__ ``` ### Thompson sampling UCB1 algorithm does not take into account actual distribution of rewards. If we know the distribution - we can do much better by using Thompson sampling: > **for** $t = 1,2,...$ **do** >> **for** $k = 1,...,K$ **do** >>> Sample $\hat\theta_k \sim beta(\alpha_k, \beta_k)$ >> **end for** >> $x_t \leftarrow argmax_{k}\hat\theta$ >> Apply $x_t$ and observe $r_t$ >> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ > **end for** More on Tompson Sampling: https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf ``` class ThompsonSamplingAgent(AbstractAgent): def get_action(self): p = np.random.beta(self._successes+1, self._failures+1) return np.argmax(p) @property def name(self): return self.__class__.__name__ from collections import OrderedDict def get_regret(env, agents, n_steps=5000, n_trials=50): scores = OrderedDict({ agent.name : [0.0 for step in range(n_steps)] for agent in agents }) for trial in range(n_trials): env.reset() for a in agents: a.init_actions(env.action_count) for i in range(n_steps): optimal_reward = env.optimal_reward() for agent in agents: action = agent.get_action() reward = env.pull(action) agent.update(action, reward) scores[agent.name][i] += optimal_reward - reward env.step() # change bandit's state if it is unstationary for agent in agents: scores[agent.name] = np.cumsum(scores[agent.name]) / n_trials return scores def plot_regret(scores): for agent in agents: plt.plot(scores[agent.name]) plt.legend([agent for agent in scores]) plt.ylabel("regret") plt.xlabel("steps") plt.show() # Uncomment agents agents = [ EpsilonGreedyAgent(), UCBAgent(), ThompsonSamplingAgent() ] regret = get_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10) plot_regret(regret) ``` ### Submit to coursera ``` from submit import submit_bandits submit_bandits(regret, '[email protected]', 'O5tkre9TrFn2XO6t') ```
github_jupyter
from abc import ABCMeta, abstractmethod, abstractproperty import enum import numpy as np np.set_printoptions(precision=3) np.set_printoptions(suppress=True) import pandas from matplotlib import pyplot as plt %matplotlib inline class BernoulliBandit: def __init__(self, n_actions=5): self._probs = np.random.random(n_actions) @property def action_count(self): return len(self._probs) def pull(self, action): if np.random.random() > self._probs[action]: return 0.0 return 1.0 def optimal_reward(self): """ Used for regret calculation """ return np.max(self._probs) def step(self): """ Used in nonstationary version """ pass def reset(self): """ Used in nonstationary version """ class AbstractAgent(metaclass=ABCMeta): def init_actions(self, n_actions): self._successes = np.zeros(n_actions) self._failures = np.zeros(n_actions) self._total_pulls = 0 @abstractmethod def get_action(self): """ Get current best action :rtype: int """ pass def update(self, action, reward): """ Observe reward from action and update agent's internal parameters :type action: int :type reward: int """ self._total_pulls += 1 if reward == 1: self._successes[action] += 1 else: self._failures[action] += 1 @property def name(self): return self.__class__.__name__ class RandomAgent(AbstractAgent): def get_action(self): return np.random.randint(0, len(self._successes)) class EpsilonGreedyAgent(AbstractAgent): def __init__(self, epsilon = 0.01): self._epsilon = epsilon def get_action(self): n_actions = self._successes + self._failures + 1e-8 p = self._successes / n_actions # alpha / (alpha + beta) # explore if np.random.random() < self._epsilon: return np.random.randint(0, len(self._successes)) #exploit else: return np.argmax(p) @property def name(self): return self.__class__.__name__ + "(epsilon={})".format(self._epsilon) class UCBAgent(AbstractAgent): def get_action(self): n_actions = self._successes + self._failures + 1e-8 p = self._successes / n_actions + \ np.sqrt(2*np.log10(self._total_pulls+1e-8)/n_actions)# alpha / (alpha + beta) return np.argmax(p) @property def name(self): return self.__class__.__name__ class ThompsonSamplingAgent(AbstractAgent): def get_action(self): p = np.random.beta(self._successes+1, self._failures+1) return np.argmax(p) @property def name(self): return self.__class__.__name__ from collections import OrderedDict def get_regret(env, agents, n_steps=5000, n_trials=50): scores = OrderedDict({ agent.name : [0.0 for step in range(n_steps)] for agent in agents }) for trial in range(n_trials): env.reset() for a in agents: a.init_actions(env.action_count) for i in range(n_steps): optimal_reward = env.optimal_reward() for agent in agents: action = agent.get_action() reward = env.pull(action) agent.update(action, reward) scores[agent.name][i] += optimal_reward - reward env.step() # change bandit's state if it is unstationary for agent in agents: scores[agent.name] = np.cumsum(scores[agent.name]) / n_trials return scores def plot_regret(scores): for agent in agents: plt.plot(scores[agent.name]) plt.legend([agent for agent in scores]) plt.ylabel("regret") plt.xlabel("steps") plt.show() # Uncomment agents agents = [ EpsilonGreedyAgent(), UCBAgent(), ThompsonSamplingAgent() ] regret = get_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10) plot_regret(regret) from submit import submit_bandits submit_bandits(regret, '[email protected]', 'O5tkre9TrFn2XO6t')
0.743541
0.873269
Признаки: Количественные Категориальные - количество ограничено (более двух) Бинарные Графики для исследования: По-одному признаку: колич, катего Взаимосвязи признаков: Остальное t-SNE: проекция многомерного пространства в 2d/3d ``` import pandas as pd from matplotlib import pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('../../data/telecom_churn.csv') df.head() ``` # 1. Признаки по-одному ## 1.1 Количественные признаки ``` df['Total day minutes'].hist(); sns.boxplot(df['Total day minutes']); ``` # 1.2. Категориальные признаки ``` df['State'].value_counts().head() df['Churn'].value_counts() sns.countplot(df['Churn']); sns.countplot(df['State']); sns.countplot(df[df['State'].\ isin(df['State'].value_counts().head().index)]['State']); ``` # Взаимодействия ## 2.1 Количественные-количественные ``` feat = [f for f in df.columns if 'charge' in f] df[feat].hist(); sns.pairplot(df[feat]); df['Churn'].map({False: 'blue', True: 'orange'}) plt.scatter(df['Total eve charge'], df['Total intl charge'], color=df['Churn'].map({False: 'blue', True: 'orange'})); plt.xlabel('Вечерние начисления (Latex: $a^2 + b^2$)'); plt.ylabel('Междунар. начисления'); plt.scatter(df[df['Churn']]['Total eve charge'], df[df['Churn']]['Total intl charge'], color='orange', label='churn'); plt.scatter(df[~df['Churn']]['Total eve charge'], df[~df['Churn']]['Total intl charge'], color='blue', label='loyal'); plt.xlabel('Вечерние начисления'); plt.ylabel('Междунар. начисления'); plt.title('Распределение начислений'); plt.legend(); sns.heatmap(df.corr()); df.drop(feat, axis=1, inplace=True) df.columns ``` ## 2.2 Колич-катег + колич-бинар ``` sns.boxplot(x='Churn', y='Total day minutes', data=df); sns.violinplot(x='Churn', y='Total day minutes', data=df); df.groupby('International plan')['Total day minutes'].mean() sns.boxplot(x='International plan', y='Total day minutes', data=df); ``` ## 2.3 Катег-катег ``` pd.crosstab(df['Churn'], df['International plan']) sns.countplot(x='International plan', hue='Churn', data=df); sns.countplot(x='Customer service calls', hue='Churn', data=df); ``` ## t-SNE (manifold learning) ``` from sklearn.manifold import TSNE TSNE? tsne = TSNE(random_state=0) df2 = df.drop('State', axis=1) df2['International plan'] = df2['International plan'].map({'Yes': 1, 'No': 0}) df2['Voice mail plan'] = df2['Voice mail plan'].map({'Yes': 1, 'No': 0}) df2.info() %%time tsne.fit(df2) dir(tsne) tsne.embedding_.shape plt.scatter(tsne.embedding_[df2['Churn'].values, 0], tsne.embedding_[df2['Churn'].values, 1], color='orange'); plt.scatter(tsne.embedding_[~df2['Churn'].values, 0], tsne.embedding_[~df2['Churn'].values, 1], color='blue'); tsne.embedding_[df2['Churn'].values, 0].shape ```
github_jupyter
import pandas as pd from matplotlib import pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('../../data/telecom_churn.csv') df.head() df['Total day minutes'].hist(); sns.boxplot(df['Total day minutes']); df['State'].value_counts().head() df['Churn'].value_counts() sns.countplot(df['Churn']); sns.countplot(df['State']); sns.countplot(df[df['State'].\ isin(df['State'].value_counts().head().index)]['State']); feat = [f for f in df.columns if 'charge' in f] df[feat].hist(); sns.pairplot(df[feat]); df['Churn'].map({False: 'blue', True: 'orange'}) plt.scatter(df['Total eve charge'], df['Total intl charge'], color=df['Churn'].map({False: 'blue', True: 'orange'})); plt.xlabel('Вечерние начисления (Latex: $a^2 + b^2$)'); plt.ylabel('Междунар. начисления'); plt.scatter(df[df['Churn']]['Total eve charge'], df[df['Churn']]['Total intl charge'], color='orange', label='churn'); plt.scatter(df[~df['Churn']]['Total eve charge'], df[~df['Churn']]['Total intl charge'], color='blue', label='loyal'); plt.xlabel('Вечерние начисления'); plt.ylabel('Междунар. начисления'); plt.title('Распределение начислений'); plt.legend(); sns.heatmap(df.corr()); df.drop(feat, axis=1, inplace=True) df.columns sns.boxplot(x='Churn', y='Total day minutes', data=df); sns.violinplot(x='Churn', y='Total day minutes', data=df); df.groupby('International plan')['Total day minutes'].mean() sns.boxplot(x='International plan', y='Total day minutes', data=df); pd.crosstab(df['Churn'], df['International plan']) sns.countplot(x='International plan', hue='Churn', data=df); sns.countplot(x='Customer service calls', hue='Churn', data=df); from sklearn.manifold import TSNE TSNE? tsne = TSNE(random_state=0) df2 = df.drop('State', axis=1) df2['International plan'] = df2['International plan'].map({'Yes': 1, 'No': 0}) df2['Voice mail plan'] = df2['Voice mail plan'].map({'Yes': 1, 'No': 0}) df2.info() %%time tsne.fit(df2) dir(tsne) tsne.embedding_.shape plt.scatter(tsne.embedding_[df2['Churn'].values, 0], tsne.embedding_[df2['Churn'].values, 1], color='orange'); plt.scatter(tsne.embedding_[~df2['Churn'].values, 0], tsne.embedding_[~df2['Churn'].values, 1], color='blue'); tsne.embedding_[df2['Churn'].values, 0].shape
0.389198
0.94256
# 100 pandas puzzles Inspired by [100 Numpy exerises](https://github.com/rougier/numpy-100), here are 100* short puzzles for testing your knowledge of [pandas'](http://pandas.pydata.org/) power. Since pandas is a large library with many different specialist features and functions, these excercises focus mainly on the fundamentals of manipulating data (indexing, grouping, aggregating, cleaning), making use of the core DataFrame and Series objects. Many of the excerises here are stright-forward in that the solutions require no more than a few lines of code (in pandas or NumPy... don't go using pure Python or Cython!). Choosing the right methods and following best practices is the underlying goal. The exercises are loosely divided in sections. Each section has a difficulty rating; these ratings are subjective, of course, but should be a seen as a rough guide as to how inventive the required solution is. If you're just starting out with pandas and you are looking for some other resources, the official documentation is very extensive. In particular, some good places get a broader overview of pandas are... - [10 minutes to pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html) - [pandas basics](http://pandas.pydata.org/pandas-docs/stable/basics.html) - [tutorials](http://pandas.pydata.org/pandas-docs/stable/tutorials.html) - [cookbook and idioms](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#cookbook) Enjoy the puzzles! \* *the list of exercises is not yet complete! Pull requests or suggestions for additional exercises, corrections and improvements are welcomed.* ## Importing pandas ### Getting started and checking your pandas setup Difficulty: *easy* **1.** Import pandas under the alias `pd`. ``` import pandas as pd ``` **2.** Print the version of pandas that has been imported. ``` pd.__version__ ``` **3.** Print out all the version information of the libraries that are required by the pandas library. ``` pd.show_versions() ``` ## DataFrame basics ### A few of the fundamental routines for selecting, sorting, adding and aggregating data in DataFrames Difficulty: *easy* Note: remember to import numpy using: ```python import numpy as np ``` Consider the following Python dictionary `data` and Python list `labels`: ``` python data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'], 'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3], 'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1], 'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']} labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] ``` (This is just some meaningless data I made up with the theme of animals and trips to a vet.) **4.** Create a DataFrame `df` from this dictionary `data` which has the index `labels`. ``` import numpy as np data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'], 'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3], 'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1], 'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']} labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] df = pd.DataFrame(data, index=labels) ``` **5.** Display a summary of the basic information about this DataFrame and its data (*hint: there is a single method that can be called on the DataFrame*). ``` df.info() # ...or... df.describe() ``` **6.** Return the first 3 rows of the DataFrame `df`. ``` df.iloc[:3] # or equivalently df.head(3) ``` **7.** Select just the 'animal' and 'age' columns from the DataFrame `df`. ``` df.loc[:, ['animal', 'age']] # or df[['animal', 'age']] ``` **8.** Select the data in rows `[3, 4, 8]` *and* in columns `['animal', 'age']`. ``` df.loc[df.index[[3, 4, 8]], ['animal', 'age']] ``` **9.** Select only the rows where the number of visits is greater than 3. ``` df[df['visits'] > 3] ``` **10.** Select the rows where the age is missing, i.e. it is `NaN`. ``` df[df['age'].isnull()] ``` **11.** Select the rows where the animal is a cat *and* the age is less than 3. ``` df[(df['animal'] == 'cat') & (df['age'] < 3)] ``` **12.** Select the rows the age is between 2 and 4 (inclusive). ``` df[df['age'].between(2, 4)] ``` **13.** Change the age in row 'f' to 1.5. ``` df.loc['f', 'age'] = 1.5 ``` **14.** Calculate the sum of all visits in `df` (i.e. the total number of visits). ``` df['visits'].sum() ``` **15.** Calculate the mean age for each different animal in `df`. ``` df.groupby('animal')['age'].mean() ``` **16.** Append a new row 'k' to `df` with your choice of values for each column. Then delete that row to return the original DataFrame. ``` df.loc['k'] = [5.5, 'dog', 'no', 2] # and then deleting the new row... df = df.drop('k') ``` **17.** Count the number of each type of animal in `df`. ``` df['animal'].value_counts() ``` **18.** Sort `df` first by the values in the 'age' in *decending* order, then by the value in the 'visit' column in *ascending* order (so row `i` should be first, and row `d` should be last). ``` df.sort_values(by=['age', 'visits'], ascending=[False, True]) ``` **19.** The 'priority' column contains the values 'yes' and 'no'. Replace this column with a column of boolean values: 'yes' should be `True` and 'no' should be `False`. ``` df['priority'] = df['priority'].map({'yes': True, 'no': False}) ``` **20.** In the 'animal' column, change the 'snake' entries to 'python'. ``` df['animal'] = df['animal'].replace('snake', 'python') ``` **21.** For each animal type and each number of visits, find the mean age. In other words, each row is an animal, each column is a number of visits and the values are the mean ages (*hint: use a pivot table*). ``` df.pivot_table(index='animal', columns='visits', values='age', aggfunc='mean') ``` ## DataFrames: beyond the basics ### Slightly trickier: you may need to combine two or more methods to get the right answer Difficulty: *medium* The previous section was tour through some basic but essential DataFrame operations. Below are some ways that you might need to cut your data, but for which there is no single "out of the box" method. **22.** You have a DataFrame `df` with a column 'A' of integers. For example: ```python df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]}) ``` How do you filter out rows which contain the same integer as the row immediately above? You should be left with a column containing the following values: ```python 1, 2, 3, 4, 5, 6, 7 ``` ``` df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]}) df.loc[df['A'].shift() != df['A']] # Alternatively, we could use drop_duplicates() here. Note # that this removes *all* duplicates though, so it won't # work as desired if A is [1, 1, 2, 2, 1, 1] for example. df.drop_duplicates(subset='A') ``` **23.** Given a DataFrame of random numeric values: ```python df = pd.DataFrame(np.random.random(size=(5, 3))) # this is a 5x3 DataFrame of float values ``` how do you subtract the row mean from each element in the row? ``` df = pd.DataFrame(np.random.random(size=(5, 3))) df.sub(df.mean(axis=1), axis=0) ``` **24.** Suppose you have DataFrame with 10 columns of real numbers, for example: ```python df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij')) ``` Which column of numbers has the smallest sum? Return that column's label. ``` df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij')) df.sum().idxmin() ``` **25.** How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)? ``` df = pd.DataFrame(np.random.randint(0, 2, size=(10, 3))) len(df) - df.duplicated(keep=False).sum() # or perhaps more simply... len(df.drop_duplicates(keep=False)) ``` The next three puzzles are slightly harder. **26.** In the cell below, you have a DataFrame `df` that consists of 10 columns of floating-point numbers. Exactly 5 entries in each row are NaN values. For each row of the DataFrame, find the *column* which contains the *third* NaN value. You should return a Series of column labels: `e, c, d, h, d` ``` nan = np.nan data = [[0.04, nan, nan, 0.25, nan, 0.43, 0.71, 0.51, nan, nan], [ nan, nan, nan, 0.04, 0.76, nan, nan, 0.67, 0.76, 0.16], [ nan, nan, 0.5 , nan, 0.31, 0.4 , nan, nan, 0.24, 0.01], [0.49, nan, nan, 0.62, 0.73, 0.26, 0.85, nan, nan, nan], [ nan, nan, 0.41, nan, 0.05, nan, 0.61, nan, 0.48, 0.68]] columns = list('abcdefghij') df = pd.DataFrame(data, columns=columns) (df.isnull().cumsum(axis=1) == 3).idxmax(axis=1) ``` **27.** A DataFrame has a column of groups 'grps' and and column of integer values 'vals': ```python df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'), 'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]}) ``` For each *group*, find the sum of the three greatest values. You should end up with the answer as follows: ``` grps a 409 b 156 c 345 ``` ``` df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'), 'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]}) df.groupby('grps')['vals'].nlargest(3).sum(level=0) ``` **28.** The DataFrame `df` constructed below has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive). For each group of 10 consecutive integers in 'A' (i.e. `(0, 10]`, `(10, 20]`, ...), calculate the sum of the corresponding values in column 'B'. The answer should be a Series as follows: ``` A (0, 10] 635 (10, 20] 360 (20, 30] 315 (30, 40] 306 (40, 50] 750 (50, 60] 284 (60, 70] 424 (70, 80] 526 (80, 90] 835 (90, 100] 852 ``` ``` df = pd.DataFrame(np.random.RandomState(8765).randint(1, 101, size=(100, 2)), columns = ["A", "B"]) df.groupby(pd.cut(df['A'], np.arange(0, 101, 10)))['B'].sum() ``` ## DataFrames: harder problems ### These might require a bit of thinking outside the box... ...but all are solvable using just the usual pandas/NumPy methods (and so avoid using explicit `for` loops). Difficulty: *hard* **29.** Consider a DataFrame `df` where there is an integer column 'X': ```python df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]}) ``` For each value, count the difference back to the previous zero (or the start of the Series, whichever is closer). These values should therefore be ``` [1, 2, 0, 1, 2, 3, 4, 0, 1, 2] ``` Make this a new column 'Y'. ``` df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]}) izero = np.r_[-1, (df == 0).values.nonzero()[0]] # indices of zeros idx = np.arange(len(df)) y = df['X'] != 0 df['Y'] = idx - izero[np.searchsorted(izero - 1, idx) - 1] # http://stackoverflow.com/questions/30730981/how-to-count-distance-to-the-previous-zero-in-pandas-series/ # credit: Behzad Nouri ``` Here's an alternative approach based on a [cookbook recipe](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#grouping): ``` df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]}) x = (df['X'] != 0).cumsum() y = x != x.shift() df['Y'] = y.groupby((y != y.shift()).cumsum()).cumsum() ``` And another approach using a groupby operation: ``` df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]}) df['Y'] = df.groupby((df['X'] == 0).cumsum()).cumcount() # We're off by one before we reach the first zero. first_zero_idx = (df['X'] == 0).idxmax() df['Y'].iloc[0:first_zero_idx] += 1 ``` **30.** Consider the DataFrame constructed below which contains rows and columns of numerical data. Create a list of the column-row index locations of the 3 largest values in this DataFrame. In this case, the answer should be: ``` [(5, 7), (6, 4), (2, 5)] ``` ``` df = pd.DataFrame(np.random.RandomState(30).randint(1, 101, size=(8, 8))) df.unstack().sort_values()[-3:].index.tolist() # http://stackoverflow.com/questions/14941261/index-and-column-for-the-max-value-in-pandas-dataframe/ # credit: DSM ``` **31.** You are given the DataFrame below with a column of group IDs, 'grps', and a column of corresponding integer values, 'vals'. ```python df = pd.DataFrame({"vals": np.random.RandomState(31).randint(-30, 30, size=15), "grps": np.random.RandomState(31).choice(["A", "B"], 15)}) ``` Create a new column 'patched_values' which contains the same values as the 'vals' any negative values in 'vals' with the group mean: ``` vals grps patched_vals 0 -12 A 13.6 1 -7 B 28.0 2 -14 A 13.6 3 4 A 4.0 4 -7 A 13.6 5 28 B 28.0 6 -2 A 13.6 7 -1 A 13.6 8 8 A 8.0 9 -2 B 28.0 10 28 A 28.0 11 12 A 12.0 12 16 A 16.0 13 -24 A 13.6 14 -12 A 13.6 ``` ``` df = pd.DataFrame({"vals": np.random.RandomState(31).randint(-30, 30, size=15), "grps": np.random.RandomState(31).choice(["A", "B"], 15)}) def replace(group): mask = group<0 group[mask] = group[~mask].mean() return group df.groupby(['grps'])['vals'].transform(replace) # http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means/ # credit: unutbu ``` **32.** Implement a rolling mean over groups with window size 3, which ignores NaN value. For example consider the following DataFrame: ```python >>> df = pd.DataFrame({'group': list('aabbabbbabab'), 'value': [1, 2, 3, np.nan, 2, 3, np.nan, 1, 7, 3, np.nan, 8]}) >>> df group value 0 a 1.0 1 a 2.0 2 b 3.0 3 b NaN 4 a 2.0 5 b 3.0 6 b NaN 7 b 1.0 8 a 7.0 9 b 3.0 10 a NaN 11 b 8.0 ``` The goal is to compute the Series: ``` 0 1.000000 1 1.500000 2 3.000000 3 3.000000 4 1.666667 5 3.000000 6 3.000000 7 2.000000 8 3.666667 9 2.000000 10 4.500000 11 4.000000 ``` E.g. the first window of size three for group 'b' has values 3.0, NaN and 3.0 and occurs at row index 5. Instead of being NaN the value in the new column at this row index should be 3.0 (just the two non-NaN values are used to compute the mean (3+3)/2) ``` df = pd.DataFrame({'group': list('aabbabbbabab'), 'value': [1, 2, 3, np.nan, 2, 3, np.nan, 1, 7, 3, np.nan, 8]}) g1 = df.groupby(['group'])['value'] # group values g2 = df.fillna(0).groupby(['group'])['value'] # fillna, then group values s = g2.rolling(3, min_periods=1).sum() / g1.rolling(3, min_periods=1).count() # compute means s.reset_index(level=0, drop=True).sort_index() # drop/sort index # http://stackoverflow.com/questions/36988123/pandas-groupby-and-rolling-apply-ignoring-nans/ ``` ## Series and DatetimeIndex ### Exercises for creating and manipulating Series with datetime data Difficulty: *easy/medium* pandas is fantastic for working with dates and times. These puzzles explore some of this functionality. **33.** Create a DatetimeIndex that contains each business day of 2015 and use it to index a Series of random numbers. Let's call this Series `s`. ``` dti = pd.date_range(start='2015-01-01', end='2015-12-31', freq='B') s = pd.Series(np.random.rand(len(dti)), index=dti) s ``` **34.** Find the sum of the values in `s` for every Wednesday. ``` s[s.index.weekday == 2].sum() ``` **35.** For each calendar month in `s`, find the mean of values. ``` s.resample('M').mean() ``` **36.** For each group of four consecutive calendar months in `s`, find the date on which the highest value occurred. ``` s.groupby(pd.Grouper(freq='4M')).idxmax() ``` **37.** Create a DateTimeIndex consisting of the third Thursday in each month for the years 2015 and 2016. ``` pd.date_range('2015-01-01', '2016-12-31', freq='WOM-3THU') ``` ## Cleaning Data ### Making a DataFrame easier to work with Difficulty: *easy/medium* It happens all the time: someone gives you data containing malformed strings, Python, lists and missing data. How do you tidy it up so you can get on with the analysis? Take this monstrosity as the DataFrame to use in the following puzzles: ```python df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm', 'Budapest_PaRis', 'Brussels_londOn'], 'FlightNumber': [10045, np.nan, 10065, np.nan, 10085], 'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]], 'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )', '12. Air France', '"Swiss Air"']}) ``` Formatted, it looks like this: ``` From_To FlightNumber RecentDelays Airline 0 LoNDon_paris 10045.0 [23, 47] KLM(!) 1 MAdrid_miLAN NaN [] <Air France> (12) 2 londON_StockhOlm 10065.0 [24, 43, 87] (British Airways. ) 3 Budapest_PaRis NaN [13] 12. Air France 4 Brussels_londOn 10085.0 [67, 32] "Swiss Air" ``` (It's some flight data I made up; it's not meant to be accurate in any way.) **38.** Some values in the the **FlightNumber** column are missing (they are `NaN`). These numbers are meant to increase by 10 with each row so 10055 and 10075 need to be put in place. Modify `df` to fill in these missing numbers and make the column an integer column (instead of a float column). ``` df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm', 'Budapest_PaRis', 'Brussels_londOn'], 'FlightNumber': [10045, np.nan, 10065, np.nan, 10085], 'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]], 'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )', '12. Air France', '"Swiss Air"']}) df['FlightNumber'] = df['FlightNumber'].interpolate().astype(int) df ``` **39.** The **From\_To** column would be better as two separate columns! Split each string on the underscore delimiter `_` to give a new temporary DataFrame called 'temp' with the correct values. Assign the correct column names 'From' and 'To' to this temporary DataFrame. ``` temp = df.From_To.str.split('_', expand=True) temp.columns = ['From', 'To'] temp ``` **40.** Notice how the capitalisation of the city names is all mixed up in this temporary DataFrame 'temp'. Standardise the strings so that only the first letter is uppercase (e.g. "londON" should become "London".) ``` temp['From'] = temp['From'].str.capitalize() temp['To'] = temp['To'].str.capitalize() temp ``` **41.** Delete the From_To column from **41.** Delete the **From_To** column from `df` and attach the temporary DataFrame 'temp' from the previous questions.`df` and attach the temporary DataFrame from the previous questions. ``` df = df.drop('From_To', axis=1) df = df.join(temp) df ``` **42**. In the **Airline** column, you can see some extra puctuation and symbols have appeared around the airline names. Pull out just the airline name. E.g. `'(British Airways. )'` should become `'British Airways'`. ``` df['Airline'] = df['Airline'].str.extract('([a-zA-Z\s]+)', expand=False).str.strip() # note: using .strip() gets rid of any leading/trailing spaces df ``` **43**. In the **RecentDelays** column, the values have been entered into the DataFrame as a list. We would like each first value in its own column, each second value in its own column, and so on. If there isn't an Nth value, the value should be NaN. Expand the Series of lists into a new DataFrame named 'delays', rename the columns 'delay_1', 'delay_2', etc. and replace the unwanted RecentDelays column in `df` with 'delays'. ``` # there are several ways to do this, but the following approach is possibly the simplest delays = df['RecentDelays'].apply(pd.Series) delays.columns = ['delay_{}'.format(n) for n in range(1, len(delays.columns)+1)] df = df.drop('RecentDelays', axis=1).join(delays) df ``` The DataFrame should look much better now: ``` FlightNumber Airline From To delay_1 delay_2 delay_3 0 10045 KLM London Paris 23.0 47.0 NaN 1 10055 Air France Madrid Milan NaN NaN NaN 2 10065 British Airways London Stockholm 24.0 43.0 87.0 3 10075 Air France Budapest Paris 13.0 NaN NaN 4 10085 Swiss Air Brussels London 67.0 32.0 NaN ``` ## Using MultiIndexes ### Go beyond flat DataFrames with additional index levels Difficulty: *medium* Previous exercises have seen us analysing data from DataFrames equipped with a single index level. However, pandas also gives you the possibilty of indexing your data using *multiple* levels. This is very much like adding new dimensions to a Series or a DataFrame. For example, a Series is 1D, but by using a MultiIndex with 2 levels we gain of much the same functionality as a 2D DataFrame. The set of puzzles below explores how you might use multiple index levels to enhance data analysis. To warm up, we'll look make a Series with two index levels. **44**. Given the lists `letters = ['A', 'B', 'C']` and `numbers = list(range(10))`, construct a MultiIndex object from the product of the two lists. Use it to index a Series of random numbers. Call this Series `s`. ``` letters = ['A', 'B', 'C'] numbers = list(range(10)) mi = pd.MultiIndex.from_product([letters, numbers]) s = pd.Series(np.random.rand(30), index=mi) s ``` **45.** Check the index of `s` is lexicographically sorted (this is a necessary proprty for indexing to work correctly with a MultiIndex). ``` s.index.is_lexsorted() # or more verbosely... s.index.lexsort_depth == s.index.nlevels ``` **46**. Select the labels `1`, `3` and `6` from the second level of the MultiIndexed Series. ``` s.loc[:, [1, 3, 6]] ``` **47**. Slice the Series `s`; slice up to label 'B' for the first level and from label 5 onwards for the second level. ``` s.loc[pd.IndexSlice[:'B', 5:]] # or equivalently without IndexSlice... s.loc[slice(None, 'B'), slice(5, None)] ``` **48**. Sum the values in `s` for each label in the first level (you should have Series giving you a total for labels A, B and C). ``` s.sum(level=0) ``` **49**. Suppose that `sum()` (and other methods) did not accept a `level` keyword argument. How else could you perform the equivalent of `s.sum(level=1)`? ``` # One way is to use .unstack()... # This method should convince you that s is essentially just a regular DataFrame in disguise! s.unstack().sum(axis=0) ``` **50**. Exchange the levels of the MultiIndex so we have an index of the form (letters, numbers). Is this new Series properly lexsorted? If not, sort it. ``` new_s = s.swaplevel(0, 1) if not new_s.index.is_lexsorted(): new_s = new_s.sort_index() new_s ``` ## Minesweeper ### Generate the numbers for safe squares in a Minesweeper grid Difficulty: *medium* to *hard* If you've ever used an older version of Windows, there's a good chance you've played with Minesweeper: - https://en.wikipedia.org/wiki/Minesweeper_(video_game) If you're not familiar with the game, imagine a grid of squares: some of these squares conceal a mine. If you click on a mine, you lose instantly. If you click on a safe square, you reveal a number telling you how many mines are found in the squares that are immediately adjacent. The aim of the game is to uncover all squares in the grid that do not contain a mine. In this section, we'll make a DataFrame that contains the necessary data for a game of Minesweeper: coordinates of the squares, whether the square contains a mine and the number of mines found on adjacent squares. **51**. Let's suppose we're playing Minesweeper on a 5 by 4 grid, i.e. ``` X = 5 Y = 4 ``` To begin, generate a DataFrame `df` with two columns, `'x'` and `'y'` containing every coordinate for this grid. That is, the DataFrame should start: ``` x y 0 0 0 1 0 1 2 0 2 ... ``` ``` X = 5 Y = 4 p = pd.core.reshape.util.cartesian_product([np.arange(X), np.arange(Y)]) df = pd.DataFrame(np.asarray(p).T, columns=['x', 'y']) df ``` **52**. For this DataFrame `df`, create a new column of zeros (safe) and ones (mine). The probability of a mine occuring at each location should be 0.4. ``` # One way is to draw samples from a binomial distribution. df['mine'] = np.random.binomial(1, 0.4, X*Y) df ``` **53**. Now create a new column for this DataFrame called `'adjacent'`. This column should contain the number of mines found on adjacent squares in the grid. (E.g. for the first row, which is the entry for the coordinate `(0, 0)`, count how many mines are found on the coordinates `(0, 1)`, `(1, 0)` and `(1, 1)`.) ``` # Here is one way to solve using merges. # It's not necessary the optimal way, just # the solution I thought of first... df['adjacent'] = \ df.merge(df + [ 1, 1, 0], on=['x', 'y'], how='left')\ .merge(df + [ 1, -1, 0], on=['x', 'y'], how='left')\ .merge(df + [-1, 1, 0], on=['x', 'y'], how='left')\ .merge(df + [-1, -1, 0], on=['x', 'y'], how='left')\ .merge(df + [ 1, 0, 0], on=['x', 'y'], how='left')\ .merge(df + [-1, 0, 0], on=['x', 'y'], how='left')\ .merge(df + [ 0, 1, 0], on=['x', 'y'], how='left')\ .merge(df + [ 0, -1, 0], on=['x', 'y'], how='left')\ .iloc[:, 3:]\ .sum(axis=1) # An alternative solution is to pivot the DataFrame # to form the "actual" grid of mines and use convolution. # See https://github.com/jakevdp/matplotlib_pydata2013/blob/master/examples/minesweeper.py from scipy.signal import convolve2d mine_grid = df.pivot_table(columns='x', index='y', values='mine') counts = convolve2d(mine_grid.astype(complex), np.ones((3, 3)), mode='same').real.astype(int) df['adjacent'] = (counts - mine_grid).ravel('F') ``` **54**. For rows of the DataFrame that contain a mine, set the value in the `'adjacent'` column to NaN. ``` df.loc[df['mine'] == 1, 'adjacent'] = np.nan ``` **55**. Finally, convert the DataFrame to grid of the adjacent mine counts: columns are the `x` coordinate, rows are the `y` coordinate. ``` df.drop('mine', axis=1).set_index(['y', 'x']).unstack() ``` ## Plotting ### Visualize trends and patterns in data Difficulty: *medium* To really get a good understanding of the data contained in your DataFrame, it is often essential to create plots: if you're lucky, trends and anomalies will jump right out at you. This functionality is baked into pandas and the puzzles below explore some of what's possible with the library. **56.** Pandas is highly integrated with the plotting library matplotlib, and makes plotting DataFrames very user-friendly! Plotting in a notebook environment usually makes use of the following boilerplate: ```python import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') ``` matplotlib is the plotting library which pandas' plotting functionality is built upon, and it is usually aliased to ```plt```. ```%matplotlib inline``` tells the notebook to show plots inline, instead of creating them in a separate window. ```plt.style.use('ggplot')``` is a style theme that most people find agreeable, based upon the styling of R's ggplot package. For starters, make a scatter plot of this random data, but use black X's instead of the default markers. ```df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]})``` Consult the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html) if you get stuck! ``` import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]}) df.plot.scatter("xs", "ys", color = "black", marker = "x") ``` **57.** Columns in your DataFrame can also be used to modify colors and sizes. Bill has been keeping track of his performance at work over time, as well as how good he was feeling that day, and whether he had a cup of coffee in the morning. Make a plot which incorporates all four features of this DataFrame. (Hint: If you're having trouble seeing the plot, try multiplying the Series which you choose to represent size by 10 or more) *The chart doesn't have to be pretty: this isn't a course in data viz!* ``` df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9], "hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2], "happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3], "caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]}) ``` ``` df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9], "hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2], "happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3], "caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]}) df.plot.scatter("hours_in", "productivity", s = df.happiness * 30, c = df.caffienated) ``` **58.** What if we want to plot multiple things? Pandas allows you to pass in a matplotlib *Axis* object for plots, and plots will also return an Axis object. Make a bar plot of monthly revenue with a line plot of monthly advertising spending (numbers in millions) ``` df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52], "advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9], "month":range(12) }) ``` ``` df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52], "advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9], "month":range(12) }) ax = df.plot.bar("month", "revenue", color = "green") df.plot.line("month", "advertising", secondary_y = True, ax = ax) ax.set_xlim((-1,12)) ``` Now we're finally ready to create a candlestick chart, which is a very common tool used to analyze stock price data. A candlestick chart shows the opening, closing, highest, and lowest price for a stock during a time window. The color of the "candle" (the thick part of the bar) is green if the stock closed above its opening price, or red if below. ![Candlestick Example](img/candle.jpg) This was initially designed to be a pandas plotting challenge, but it just so happens that this type of plot is just not feasible using pandas' methods. If you are unfamiliar with matplotlib, we have provided a function that will plot the chart for you so long as you can use pandas to get the data into the correct format. Your first step should be to get the data in the correct format using pandas' time-series grouping function. We would like each candle to represent an hour's worth of data. You can write your own aggregation function which returns the open/high/low/close, but pandas has a built-in which also does this. The below cell contains helper functions. Call ```day_stock_data()``` to generate a DataFrame containing the prices a hypothetical stock sold for, and the time the sale occurred. Call ```plot_candlestick(df)``` on your properly aggregated and formatted stock data to print the candlestick chart. ``` #This function is designed to create semi-interesting random stock price data import numpy as np def float_to_time(x): return str(int(x)) + ":" + str(int(x%1 * 60)).zfill(2) + ":" + str(int(x*60 % 1 * 60)).zfill(2) def day_stock_data(): #NYSE is open from 9:30 to 4:00 time = 9.5 price = 100 results = [(float_to_time(time), price)] while time < 16: elapsed = np.random.exponential(.001) time += elapsed if time > 16: break price_diff = np.random.uniform(.999, 1.001) price *= price_diff results.append((float_to_time(time), price)) df = pd.DataFrame(results, columns = ['time','price']) df.time = pd.to_datetime(df.time) return df def plot_candlestick(agg): fig, ax = plt.subplots() for time in agg.index: ax.plot([time.hour] * 2, agg.loc[time, ["high","low"]].values, color = "black") ax.plot([time.hour] * 2, agg.loc[time, ["open","close"]].values, color = agg.loc[time, "color"], linewidth = 10) ax.set_xlim((8,16)) ax.set_ylabel("Price") ax.set_xlabel("Hour") ax.set_title("OHLC of Stock Value During Trading Day") plt.show() ``` **59.** Generate a day's worth of random stock data, and aggregate / reformat it so that it has hourly summaries of the opening, highest, lowest, and closing prices ``` df = day_stock_data() df.head() df.set_index("time", inplace = True) agg = df.resample("H").ohlc() agg.columns = agg.columns.droplevel() agg["color"] = (agg.close > agg.open).map({True:"green",False:"red"}) agg.head() ``` **60.** Now that you have your properly-formatted data, try to plot it yourself as a candlestick chart. Use the ```plot_candlestick(df)``` function above, or matplotlib's [```plot``` documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html) if you get stuck. ``` plot_candlestick(agg) ``` *More exercises to follow soon...*
github_jupyter
import pandas as pd pd.__version__ pd.show_versions() import numpy as np (This is just some meaningless data I made up with the theme of animals and trips to a vet.) **4.** Create a DataFrame `df` from this dictionary `data` which has the index `labels`. **5.** Display a summary of the basic information about this DataFrame and its data (*hint: there is a single method that can be called on the DataFrame*). **6.** Return the first 3 rows of the DataFrame `df`. **7.** Select just the 'animal' and 'age' columns from the DataFrame `df`. **8.** Select the data in rows `[3, 4, 8]` *and* in columns `['animal', 'age']`. **9.** Select only the rows where the number of visits is greater than 3. **10.** Select the rows where the age is missing, i.e. it is `NaN`. **11.** Select the rows where the animal is a cat *and* the age is less than 3. **12.** Select the rows the age is between 2 and 4 (inclusive). **13.** Change the age in row 'f' to 1.5. **14.** Calculate the sum of all visits in `df` (i.e. the total number of visits). **15.** Calculate the mean age for each different animal in `df`. **16.** Append a new row 'k' to `df` with your choice of values for each column. Then delete that row to return the original DataFrame. **17.** Count the number of each type of animal in `df`. **18.** Sort `df` first by the values in the 'age' in *decending* order, then by the value in the 'visit' column in *ascending* order (so row `i` should be first, and row `d` should be last). **19.** The 'priority' column contains the values 'yes' and 'no'. Replace this column with a column of boolean values: 'yes' should be `True` and 'no' should be `False`. **20.** In the 'animal' column, change the 'snake' entries to 'python'. **21.** For each animal type and each number of visits, find the mean age. In other words, each row is an animal, each column is a number of visits and the values are the mean ages (*hint: use a pivot table*). ## DataFrames: beyond the basics ### Slightly trickier: you may need to combine two or more methods to get the right answer Difficulty: *medium* The previous section was tour through some basic but essential DataFrame operations. Below are some ways that you might need to cut your data, but for which there is no single "out of the box" method. **22.** You have a DataFrame `df` with a column 'A' of integers. For example: How do you filter out rows which contain the same integer as the row immediately above? You should be left with a column containing the following values: **23.** Given a DataFrame of random numeric values: how do you subtract the row mean from each element in the row? **24.** Suppose you have DataFrame with 10 columns of real numbers, for example: Which column of numbers has the smallest sum? Return that column's label. **25.** How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)? The next three puzzles are slightly harder. **26.** In the cell below, you have a DataFrame `df` that consists of 10 columns of floating-point numbers. Exactly 5 entries in each row are NaN values. For each row of the DataFrame, find the *column* which contains the *third* NaN value. You should return a Series of column labels: `e, c, d, h, d` **27.** A DataFrame has a column of groups 'grps' and and column of integer values 'vals': For each *group*, find the sum of the three greatest values. You should end up with the answer as follows: **28.** The DataFrame `df` constructed below has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive). For each group of 10 consecutive integers in 'A' (i.e. `(0, 10]`, `(10, 20]`, ...), calculate the sum of the corresponding values in column 'B'. The answer should be a Series as follows: ## DataFrames: harder problems ### These might require a bit of thinking outside the box... ...but all are solvable using just the usual pandas/NumPy methods (and so avoid using explicit `for` loops). Difficulty: *hard* **29.** Consider a DataFrame `df` where there is an integer column 'X': For each value, count the difference back to the previous zero (or the start of the Series, whichever is closer). These values should therefore be Make this a new column 'Y'. Here's an alternative approach based on a [cookbook recipe](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#grouping): And another approach using a groupby operation: **30.** Consider the DataFrame constructed below which contains rows and columns of numerical data. Create a list of the column-row index locations of the 3 largest values in this DataFrame. In this case, the answer should be: **31.** You are given the DataFrame below with a column of group IDs, 'grps', and a column of corresponding integer values, 'vals'. Create a new column 'patched_values' which contains the same values as the 'vals' any negative values in 'vals' with the group mean: **32.** Implement a rolling mean over groups with window size 3, which ignores NaN value. For example consider the following DataFrame: The goal is to compute the Series: E.g. the first window of size three for group 'b' has values 3.0, NaN and 3.0 and occurs at row index 5. Instead of being NaN the value in the new column at this row index should be 3.0 (just the two non-NaN values are used to compute the mean (3+3)/2) ## Series and DatetimeIndex ### Exercises for creating and manipulating Series with datetime data Difficulty: *easy/medium* pandas is fantastic for working with dates and times. These puzzles explore some of this functionality. **33.** Create a DatetimeIndex that contains each business day of 2015 and use it to index a Series of random numbers. Let's call this Series `s`. **34.** Find the sum of the values in `s` for every Wednesday. **35.** For each calendar month in `s`, find the mean of values. **36.** For each group of four consecutive calendar months in `s`, find the date on which the highest value occurred. **37.** Create a DateTimeIndex consisting of the third Thursday in each month for the years 2015 and 2016. ## Cleaning Data ### Making a DataFrame easier to work with Difficulty: *easy/medium* It happens all the time: someone gives you data containing malformed strings, Python, lists and missing data. How do you tidy it up so you can get on with the analysis? Take this monstrosity as the DataFrame to use in the following puzzles: Formatted, it looks like this: (It's some flight data I made up; it's not meant to be accurate in any way.) **38.** Some values in the the **FlightNumber** column are missing (they are `NaN`). These numbers are meant to increase by 10 with each row so 10055 and 10075 need to be put in place. Modify `df` to fill in these missing numbers and make the column an integer column (instead of a float column). **39.** The **From\_To** column would be better as two separate columns! Split each string on the underscore delimiter `_` to give a new temporary DataFrame called 'temp' with the correct values. Assign the correct column names 'From' and 'To' to this temporary DataFrame. **40.** Notice how the capitalisation of the city names is all mixed up in this temporary DataFrame 'temp'. Standardise the strings so that only the first letter is uppercase (e.g. "londON" should become "London".) **41.** Delete the From_To column from **41.** Delete the **From_To** column from `df` and attach the temporary DataFrame 'temp' from the previous questions.`df` and attach the temporary DataFrame from the previous questions. **42**. In the **Airline** column, you can see some extra puctuation and symbols have appeared around the airline names. Pull out just the airline name. E.g. `'(British Airways. )'` should become `'British Airways'`. **43**. In the **RecentDelays** column, the values have been entered into the DataFrame as a list. We would like each first value in its own column, each second value in its own column, and so on. If there isn't an Nth value, the value should be NaN. Expand the Series of lists into a new DataFrame named 'delays', rename the columns 'delay_1', 'delay_2', etc. and replace the unwanted RecentDelays column in `df` with 'delays'. The DataFrame should look much better now: ## Using MultiIndexes ### Go beyond flat DataFrames with additional index levels Difficulty: *medium* Previous exercises have seen us analysing data from DataFrames equipped with a single index level. However, pandas also gives you the possibilty of indexing your data using *multiple* levels. This is very much like adding new dimensions to a Series or a DataFrame. For example, a Series is 1D, but by using a MultiIndex with 2 levels we gain of much the same functionality as a 2D DataFrame. The set of puzzles below explores how you might use multiple index levels to enhance data analysis. To warm up, we'll look make a Series with two index levels. **44**. Given the lists `letters = ['A', 'B', 'C']` and `numbers = list(range(10))`, construct a MultiIndex object from the product of the two lists. Use it to index a Series of random numbers. Call this Series `s`. **45.** Check the index of `s` is lexicographically sorted (this is a necessary proprty for indexing to work correctly with a MultiIndex). **46**. Select the labels `1`, `3` and `6` from the second level of the MultiIndexed Series. **47**. Slice the Series `s`; slice up to label 'B' for the first level and from label 5 onwards for the second level. **48**. Sum the values in `s` for each label in the first level (you should have Series giving you a total for labels A, B and C). **49**. Suppose that `sum()` (and other methods) did not accept a `level` keyword argument. How else could you perform the equivalent of `s.sum(level=1)`? **50**. Exchange the levels of the MultiIndex so we have an index of the form (letters, numbers). Is this new Series properly lexsorted? If not, sort it. ## Minesweeper ### Generate the numbers for safe squares in a Minesweeper grid Difficulty: *medium* to *hard* If you've ever used an older version of Windows, there's a good chance you've played with Minesweeper: - https://en.wikipedia.org/wiki/Minesweeper_(video_game) If you're not familiar with the game, imagine a grid of squares: some of these squares conceal a mine. If you click on a mine, you lose instantly. If you click on a safe square, you reveal a number telling you how many mines are found in the squares that are immediately adjacent. The aim of the game is to uncover all squares in the grid that do not contain a mine. In this section, we'll make a DataFrame that contains the necessary data for a game of Minesweeper: coordinates of the squares, whether the square contains a mine and the number of mines found on adjacent squares. **51**. Let's suppose we're playing Minesweeper on a 5 by 4 grid, i.e. To begin, generate a DataFrame `df` with two columns, `'x'` and `'y'` containing every coordinate for this grid. That is, the DataFrame should start: **52**. For this DataFrame `df`, create a new column of zeros (safe) and ones (mine). The probability of a mine occuring at each location should be 0.4. **53**. Now create a new column for this DataFrame called `'adjacent'`. This column should contain the number of mines found on adjacent squares in the grid. (E.g. for the first row, which is the entry for the coordinate `(0, 0)`, count how many mines are found on the coordinates `(0, 1)`, `(1, 0)` and `(1, 1)`.) **54**. For rows of the DataFrame that contain a mine, set the value in the `'adjacent'` column to NaN. **55**. Finally, convert the DataFrame to grid of the adjacent mine counts: columns are the `x` coordinate, rows are the `y` coordinate. ## Plotting ### Visualize trends and patterns in data Difficulty: *medium* To really get a good understanding of the data contained in your DataFrame, it is often essential to create plots: if you're lucky, trends and anomalies will jump right out at you. This functionality is baked into pandas and the puzzles below explore some of what's possible with the library. **56.** Pandas is highly integrated with the plotting library matplotlib, and makes plotting DataFrames very user-friendly! Plotting in a notebook environment usually makes use of the following boilerplate: matplotlib is the plotting library which pandas' plotting functionality is built upon, and it is usually aliased to ```plt```. Consult the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html) if you get stuck! **57.** Columns in your DataFrame can also be used to modify colors and sizes. Bill has been keeping track of his performance at work over time, as well as how good he was feeling that day, and whether he had a cup of coffee in the morning. Make a plot which incorporates all four features of this DataFrame. (Hint: If you're having trouble seeing the plot, try multiplying the Series which you choose to represent size by 10 or more) *The chart doesn't have to be pretty: this isn't a course in data viz!* **58.** What if we want to plot multiple things? Pandas allows you to pass in a matplotlib *Axis* object for plots, and plots will also return an Axis object. Make a bar plot of monthly revenue with a line plot of monthly advertising spending (numbers in millions) Now we're finally ready to create a candlestick chart, which is a very common tool used to analyze stock price data. A candlestick chart shows the opening, closing, highest, and lowest price for a stock during a time window. The color of the "candle" (the thick part of the bar) is green if the stock closed above its opening price, or red if below. ![Candlestick Example](img/candle.jpg) This was initially designed to be a pandas plotting challenge, but it just so happens that this type of plot is just not feasible using pandas' methods. If you are unfamiliar with matplotlib, we have provided a function that will plot the chart for you so long as you can use pandas to get the data into the correct format. Your first step should be to get the data in the correct format using pandas' time-series grouping function. We would like each candle to represent an hour's worth of data. You can write your own aggregation function which returns the open/high/low/close, but pandas has a built-in which also does this. The below cell contains helper functions. Call ```day_stock_data()``` to generate a DataFrame containing the prices a hypothetical stock sold for, and the time the sale occurred. Call ```plot_candlestick(df)``` on your properly aggregated and formatted stock data to print the candlestick chart. **59.** Generate a day's worth of random stock data, and aggregate / reformat it so that it has hourly summaries of the opening, highest, lowest, and closing prices **60.** Now that you have your properly-formatted data, try to plot it yourself as a candlestick chart. Use the ```plot_candlestick(df)``` function above, or matplotlib's [```plot``` documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html) if you get stuck.
0.801897
0.982856
Load modules ``` import pickle import tensorflow as tf from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D import keras # flags = tf.app.flags # FLAGS = flags.FLAGS # # command line flags # flags.DEFINE_string('training_file', '', "Bottleneck features training file (.p)") # flags.DEFINE_string('validation_file', '', "Bottleneck features validation file (.p)") def load_bottleneck_data(training_file, validation_file): """ Utility function to load bottleneck features. Arguments: training_file - String validation_file - String """ print("Training file", training_file) print("Validation file", validation_file) with open(training_file, 'rb') as f: train_data = pickle.load(f) with open(validation_file, 'rb') as f: validation_data = pickle.load(f) X_train = train_data['features'] y_train = train_data['labels'] X_val = validation_data['features'] y_val = validation_data['labels'] return X_train, y_train, X_val, y_val def main(data_set, training_file, validation_file, epochs, learning_rate): # load bottleneck data X_train, y_train, X_val, y_val = load_bottleneck_data(training_file, validation_file) if data_set=='cifar10': n_labels = 10 elif data_set=='traffic': n_labels = 43 x_train = X_train.reshape((X_train.shape[0], X_train.shape[-1])) x_val = X_val.reshape((X_val.shape[0], X_val.shape[-1])) print(x_train.shape) model = Sequential() model.add(Dense(n_labels, input_shape=(x_train.shape[1],), activation='softmax')) adam = keras.optimizers.Adam(lr=learning_rate) model.compile('adam', 'sparse_categorical_crossentropy', ['accuracy']) history = model.fit(x_train, y_train, nb_epoch=epochs) scores = model.evaluate(x_val, y_val) return scores # training_file = 'from-link/vgg_traffic_100_bottleneck_features_train.p' # validation_file = 'from-link/vgg_traffic_bottleneck_features_validation.p' # # load bottleneck data # X_train, y_train, X_val, y_val = load_bottleneck_data(training_file, validation_file) # print(X_train.shape, y_train.shape) # print(X_val.shape, y_val.shape) # # TODO: define your model and hyperparams here # # make sure to adjust the number of classes based on # # the dataset # # 10 for cifar10 # # 43 for traffic # n_labels_cifar10 = 10 # n_labels_traffic = 43 # # n_bottleneck = X_train.shape # x_train = X_train.reshape((X_train.shape[0], X_train.shape[-1])) # x_val = X_val.reshape((X_val.shape[0], X_val.shape[-1])) # print(x_train.shape, x_val.shape) # model = Sequential() # model.add(Dense(n_labels_traffic, input_shape=(x_train.shape[1],), activation='softmax')) # model.compile('adam', 'sparse_categorical_crossentropy', ['accuracy']) # history = model.fit(x_train, y_train, nb_epoch=10, validation_split=0.1) # # TODO: train your model here folder_name = 'from-git' model_name = 'vgg' data_name = 'cifar10' training_file = '{folder_name}/{}_{}_bottleneck_features_train.p'.format(folder_name, model_name, data_name) validation_file = '{folder_name}/{}_{}_bottleneck_features_validation.p'.format(folder_name, model_name, data_name) scores = main(data_name, training_file, validation_file, 50, 0.0001) print() print(scores) scores ```
github_jupyter
import pickle import tensorflow as tf from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D import keras # flags = tf.app.flags # FLAGS = flags.FLAGS # # command line flags # flags.DEFINE_string('training_file', '', "Bottleneck features training file (.p)") # flags.DEFINE_string('validation_file', '', "Bottleneck features validation file (.p)") def load_bottleneck_data(training_file, validation_file): """ Utility function to load bottleneck features. Arguments: training_file - String validation_file - String """ print("Training file", training_file) print("Validation file", validation_file) with open(training_file, 'rb') as f: train_data = pickle.load(f) with open(validation_file, 'rb') as f: validation_data = pickle.load(f) X_train = train_data['features'] y_train = train_data['labels'] X_val = validation_data['features'] y_val = validation_data['labels'] return X_train, y_train, X_val, y_val def main(data_set, training_file, validation_file, epochs, learning_rate): # load bottleneck data X_train, y_train, X_val, y_val = load_bottleneck_data(training_file, validation_file) if data_set=='cifar10': n_labels = 10 elif data_set=='traffic': n_labels = 43 x_train = X_train.reshape((X_train.shape[0], X_train.shape[-1])) x_val = X_val.reshape((X_val.shape[0], X_val.shape[-1])) print(x_train.shape) model = Sequential() model.add(Dense(n_labels, input_shape=(x_train.shape[1],), activation='softmax')) adam = keras.optimizers.Adam(lr=learning_rate) model.compile('adam', 'sparse_categorical_crossentropy', ['accuracy']) history = model.fit(x_train, y_train, nb_epoch=epochs) scores = model.evaluate(x_val, y_val) return scores # training_file = 'from-link/vgg_traffic_100_bottleneck_features_train.p' # validation_file = 'from-link/vgg_traffic_bottleneck_features_validation.p' # # load bottleneck data # X_train, y_train, X_val, y_val = load_bottleneck_data(training_file, validation_file) # print(X_train.shape, y_train.shape) # print(X_val.shape, y_val.shape) # # TODO: define your model and hyperparams here # # make sure to adjust the number of classes based on # # the dataset # # 10 for cifar10 # # 43 for traffic # n_labels_cifar10 = 10 # n_labels_traffic = 43 # # n_bottleneck = X_train.shape # x_train = X_train.reshape((X_train.shape[0], X_train.shape[-1])) # x_val = X_val.reshape((X_val.shape[0], X_val.shape[-1])) # print(x_train.shape, x_val.shape) # model = Sequential() # model.add(Dense(n_labels_traffic, input_shape=(x_train.shape[1],), activation='softmax')) # model.compile('adam', 'sparse_categorical_crossentropy', ['accuracy']) # history = model.fit(x_train, y_train, nb_epoch=10, validation_split=0.1) # # TODO: train your model here folder_name = 'from-git' model_name = 'vgg' data_name = 'cifar10' training_file = '{folder_name}/{}_{}_bottleneck_features_train.p'.format(folder_name, model_name, data_name) validation_file = '{folder_name}/{}_{}_bottleneck_features_validation.p'.format(folder_name, model_name, data_name) scores = main(data_name, training_file, validation_file, 50, 0.0001) print() print(scores) scores
0.526586
0.729688
``` import sys sys.path.append("../") from collections import OrderedDict import torch import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data as data import torchvision.transforms as transforms import torchvision import matplotlib.pyplot as plt %matplotlib inline from PIL import Image import transforms as ext_transforms from models.enet import ENet from metric.iou import IoU from args import get_arguments from data.utils import enet_weighing, median_freq_balancing import utils import glob import cv2 import transforms as ext_transforms from torchvision.transforms import Compose, Resize, ToPILImage, ToTensor import random cityscapes_color_encoding = OrderedDict([ ('unlabeled', (0, 0, 0)), ('road', (128, 64, 128)), ('sidewalk', (244, 35, 232)), ('building', (70, 70, 70)), ('wall', (102, 102, 156)), ('fence', (190, 153, 153)), ('pole', (153, 153, 153)), ('traffic_light', (250, 170, 30)), ('traffic_sign', (220, 220, 0)), ('vegetation', (107, 142, 35)), ('terrain', (152, 251, 152)), ('sky', (70, 130, 180)), ('person', (220, 20, 60)), ('rider', (255, 0, 0)), ('car', (0, 0, 142)), ('truck', (0, 0, 70)), ('bus', (0, 60, 100)), ('train', (0, 80, 100)), ('motorcycle', (0, 0, 230)), ('bicycle', (119, 11, 32)) ]) camvid_color_encoding = OrderedDict([ ('sky', (128, 128, 128)), ('building', (128, 0, 0)), ('pole', (192, 192, 128)), ('road_marking', (255, 69, 0)), ('road', (128, 64, 128)), ('pavement', (60, 40, 222)), ('tree', (128, 128, 0)), ('sign_symbol', (192, 128, 128)), ('fence', (64, 64, 128)), ('car', (64, 0, 128)), ('pedestrian', (64, 64, 0)), ('bicyclist', (0, 128, 192)), ('unlabeled', (0, 0, 0)) ]) device = torch.device('cuda:6') if torch.cuda.is_available() else torch.device('cpu') model_obj = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) model_obj.eval() model_obj.to(device) device = torch.device('cuda:7') if torch.cuda.is_available() else torch.device('cpu') model_enet = ENet(12).to(device) #19 for Cityscapes 11 for Camvid optimizer = optim.Adam(model_enet.parameters()) model_enet = utils.load_checkpoint(model_enet, optimizer, '../save/ENet_CamVid/','ENet' )[0] image_transform = transforms.Compose( [transforms.Resize((480, 640)), transforms.ToTensor()]) model_enet.eval() COCO_INSTANCE_CATEGORY_NAMES = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] transformations = Compose([ToPILImage(),ToTensor()]) def get_prediction(image, threshold): # i = cv2.imread(img_path) img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) img = transformations(img).unsqueeze(0).to(device) pred = model(img) pred_score = list(pred[0]['scores'].detach().cpu().numpy()) pred_t = [pred_score.index(x) for x in pred_score if x>threshold] if len(pred_t) > 0: pred_t = pred_t[-1] masks = (pred[0]['masks']>0.5).squeeze().detach().cpu().numpy() pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].cpu().numpy())] pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().cpu().numpy())] masks = masks[:pred_t+1] pred_boxes = pred_boxes[:pred_t+1] pred_class = pred_class[:pred_t+1] return masks, pred_boxes, pred_class else: return [] , [] , [] def get_frame_prediction(frame, threshold): img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = transformations(img).unsqueeze(0).to(device) pred = model([img]) pred_score = list(pred[0]['scores'].detach().numpy()) pred_t = [pred_score.index(x) for x in pred_score if x>threshold][-1] masks = (pred[0]['masks']>0.5).squeeze().detach().cpu().numpy() pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())] pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] masks = masks[:pred_t+1] pred_boxes = pred_boxes[:pred_t+1] pred_class = pred_class[:pred_t+1] return masks, pred_boxes, pred_class def random_colour_masks(image): colours = [[0, 255, 0],[0, 0, 255],[255, 0, 0],[0, 255, 255],[255, 255, 0],[255, 0, 255],[80, 70, 180],[250, 80, 190],[245, 145, 50],[70, 150, 250],[50, 190, 190]] r = np.zeros_like(image).astype(np.uint8) g = np.zeros_like(image).astype(np.uint8) b = np.zeros_like(image).astype(np.uint8) r[image == 1], g[image == 1], b[image == 1] = colours[random.randrange(0,10)] coloured_mask = np.stack([r, g, b], axis=2) return coloured_mask def instance_segmentation_api(img_path, threshold=0.5, rect_th=3, text_size=0.1, text_th=3): img = cv2.imread(img_path) masks, boxes, pred_cls = get_prediction(img, threshold) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i in range(len(masks)): rgb_mask = random_colour_masks(masks[i]) img = cv2.addWeighted(img, 1, rgb_mask, 0.5, 0) cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th) cv2.putText(img,pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th) plt.figure(figsize=(20,30)) plt.imshow(img) plt.xticks([]) plt.yticks([]) plt.show() def predict_enet(image_transform,model): img = image_transform(img).unsqueeze(0).to(device) pred = model(img) _, predictions = torch.max(pred.data, 1) label_to_rgb = transforms.Compose([ ext_transforms.LongTensorToRGBPIL(camvid_color_encoding), transforms.ToTensor() ]) ```
github_jupyter
import sys sys.path.append("../") from collections import OrderedDict import torch import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data as data import torchvision.transforms as transforms import torchvision import matplotlib.pyplot as plt %matplotlib inline from PIL import Image import transforms as ext_transforms from models.enet import ENet from metric.iou import IoU from args import get_arguments from data.utils import enet_weighing, median_freq_balancing import utils import glob import cv2 import transforms as ext_transforms from torchvision.transforms import Compose, Resize, ToPILImage, ToTensor import random cityscapes_color_encoding = OrderedDict([ ('unlabeled', (0, 0, 0)), ('road', (128, 64, 128)), ('sidewalk', (244, 35, 232)), ('building', (70, 70, 70)), ('wall', (102, 102, 156)), ('fence', (190, 153, 153)), ('pole', (153, 153, 153)), ('traffic_light', (250, 170, 30)), ('traffic_sign', (220, 220, 0)), ('vegetation', (107, 142, 35)), ('terrain', (152, 251, 152)), ('sky', (70, 130, 180)), ('person', (220, 20, 60)), ('rider', (255, 0, 0)), ('car', (0, 0, 142)), ('truck', (0, 0, 70)), ('bus', (0, 60, 100)), ('train', (0, 80, 100)), ('motorcycle', (0, 0, 230)), ('bicycle', (119, 11, 32)) ]) camvid_color_encoding = OrderedDict([ ('sky', (128, 128, 128)), ('building', (128, 0, 0)), ('pole', (192, 192, 128)), ('road_marking', (255, 69, 0)), ('road', (128, 64, 128)), ('pavement', (60, 40, 222)), ('tree', (128, 128, 0)), ('sign_symbol', (192, 128, 128)), ('fence', (64, 64, 128)), ('car', (64, 0, 128)), ('pedestrian', (64, 64, 0)), ('bicyclist', (0, 128, 192)), ('unlabeled', (0, 0, 0)) ]) device = torch.device('cuda:6') if torch.cuda.is_available() else torch.device('cpu') model_obj = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) model_obj.eval() model_obj.to(device) device = torch.device('cuda:7') if torch.cuda.is_available() else torch.device('cpu') model_enet = ENet(12).to(device) #19 for Cityscapes 11 for Camvid optimizer = optim.Adam(model_enet.parameters()) model_enet = utils.load_checkpoint(model_enet, optimizer, '../save/ENet_CamVid/','ENet' )[0] image_transform = transforms.Compose( [transforms.Resize((480, 640)), transforms.ToTensor()]) model_enet.eval() COCO_INSTANCE_CATEGORY_NAMES = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] transformations = Compose([ToPILImage(),ToTensor()]) def get_prediction(image, threshold): # i = cv2.imread(img_path) img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) img = transformations(img).unsqueeze(0).to(device) pred = model(img) pred_score = list(pred[0]['scores'].detach().cpu().numpy()) pred_t = [pred_score.index(x) for x in pred_score if x>threshold] if len(pred_t) > 0: pred_t = pred_t[-1] masks = (pred[0]['masks']>0.5).squeeze().detach().cpu().numpy() pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].cpu().numpy())] pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().cpu().numpy())] masks = masks[:pred_t+1] pred_boxes = pred_boxes[:pred_t+1] pred_class = pred_class[:pred_t+1] return masks, pred_boxes, pred_class else: return [] , [] , [] def get_frame_prediction(frame, threshold): img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = transformations(img).unsqueeze(0).to(device) pred = model([img]) pred_score = list(pred[0]['scores'].detach().numpy()) pred_t = [pred_score.index(x) for x in pred_score if x>threshold][-1] masks = (pred[0]['masks']>0.5).squeeze().detach().cpu().numpy() pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())] pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] masks = masks[:pred_t+1] pred_boxes = pred_boxes[:pred_t+1] pred_class = pred_class[:pred_t+1] return masks, pred_boxes, pred_class def random_colour_masks(image): colours = [[0, 255, 0],[0, 0, 255],[255, 0, 0],[0, 255, 255],[255, 255, 0],[255, 0, 255],[80, 70, 180],[250, 80, 190],[245, 145, 50],[70, 150, 250],[50, 190, 190]] r = np.zeros_like(image).astype(np.uint8) g = np.zeros_like(image).astype(np.uint8) b = np.zeros_like(image).astype(np.uint8) r[image == 1], g[image == 1], b[image == 1] = colours[random.randrange(0,10)] coloured_mask = np.stack([r, g, b], axis=2) return coloured_mask def instance_segmentation_api(img_path, threshold=0.5, rect_th=3, text_size=0.1, text_th=3): img = cv2.imread(img_path) masks, boxes, pred_cls = get_prediction(img, threshold) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i in range(len(masks)): rgb_mask = random_colour_masks(masks[i]) img = cv2.addWeighted(img, 1, rgb_mask, 0.5, 0) cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th) cv2.putText(img,pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th) plt.figure(figsize=(20,30)) plt.imshow(img) plt.xticks([]) plt.yticks([]) plt.show() def predict_enet(image_transform,model): img = image_transform(img).unsqueeze(0).to(device) pred = model(img) _, predictions = torch.max(pred.data, 1) label_to_rgb = transforms.Compose([ ext_transforms.LongTensorToRGBPIL(camvid_color_encoding), transforms.ToTensor() ])
0.448909
0.447762
``` import numpy as np import pandas as pd import plotly.graph_objects as go import ipywidgets as widgets from IPython.display import display np.seterr(divide = 'ignore') df = pd.read_csv('player_v_leaguedf.csv') output_team_table = widgets.Output() def get_team_data(team_name): output_team_table.clear_output() with output_team_table: team_df = df.query(f'team_name == "{team_name.new}" and player_avg > 0') fig = go.Figure(data=[go.Table( header=dict(values=[team_df.columns[0],team_df.columns[1],team_df.columns[2],team_df.columns[4],team_df.columns[5],team_df.columns[6],team_df.columns[8],team_df.columns[7],team_df.columns[10]], fill_color='paleturquoise', align='left'), cells=dict(values=[team_df.x, team_df.y, team_df.xy, team_df.first_name, team_df.last_name, team_df.team_name, team_df.goals, team_df.shots, team_df.player_avg], fill_color='lavender', align='left')) ]) fig.update_layout(title= f'{team_name.new} Player Shot Averages > 0') display(go.FigureWidget(fig)) displayTeamShotPlot(df,team_name.new) player_vs_league(df,team_name.new) output_teamPlot = widgets.Output() def displayTeamShotPlot(df,team_name): output_teamPlot.clear_output() with output_teamPlot: fig = go.FigureWidget(data=[go.Histogram2d(x=df.query(f'team_name == "{team_name}" and goals != 0')['x'].abs(), y=df.query(f'team_name == "{team_name}" and goals != 0')['y'], colorbar={'title':{ 'text': 'Counts' } }, colorscale='YlGnBu', hovertemplate= '<b>X</b>: %{x}<br><extra></extra>'+ '<b>Y</b>: %{y}<br>'+ '<b>Goals</b>: %{z}<br>' )], layout= { 'title': {'text':f'Density Plot of {team_name} Goal Shot Locations, 2018-2019 Season'}, 'legend':{'title':{'text':'Count','side':'top'}}, 'xaxis_title_text':'X Coordiante', 'yaxis_title_text':'Y Coordiante', 'template': 'plotly_white', 'xaxis':{ 'type':'linear', 'range':[-10,99], 'autorange': True, 'showgrid': False, 'zeroline': False }, 'yaxis': { 'type': 'linear', 'range':[-43,43], 'autorange': False, 'showgrid': False, 'zeroline': False }, 'shapes': [ { 'x0': '0', 'x1':'0', 'y0': '42.5', 'y1': '-42.5', 'line': {'color':'red','width': 2, 'dash': 'dash'}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '42.5', 'y1': '42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '-42.5', 'y1': '-42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '100', 'x1': '100', 'y0': '22', 'y1': '-22', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 42.5 C 99 42.5, 99 32, 100 22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 -42.5 C 99 -42.5, 99 -32, 100 -22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0':'89', 'x1':'89', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'red','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0':'25', 'x1':'25', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'blue','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 1}, 'path': 'M 85 4 C 83 4, 83 -4, 85 -4', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '4', 'y1': '4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '-4', 'y1': '-4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1': '91', 'y0': '2', 'y1': '-2', 'line': {'width': 1}, 'layer': 'above', 'type': 'rect', 'xref': 'x', 'yref': 'y' } ], 'annotations': [ { 'x': 92.5, 'y': 0, 'text': 'Goal', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 90.5, 'y': 37, 'text': 'Goal Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 26.5, 'y': 37, 'text': 'Blue Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 1.5, 'y': 34, 'text': 'Center Red Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False } ] }) display(fig) output_teamvsleague = widgets.Output() def player_vs_league(df,team_name): output_teamvsleague.clear_output() with output_teamvsleague: fig = go.FigureWidget(data=[go.Scattergl(x=df.query(f'team_name == "{team_name}" and player_avg > league_avg and player_avg != 1')['x'].abs(),y=df.query(f'team_name == "{team_name}"')['y'], marker={'size':df.query(f'team_name == "{team_name}" and player_avg > league_avg and player_avg != 1')['player_avg'],'sizemin':1,'sizeref':.05}, mode='markers', text= df.query(f'team_name == "{team_name}" and player_avg > league_avg and player_avg != 1')['league_avg'], hovertemplate= '<b>X</b>: %{x}<br><extra></extra>'+ '<b>Y</b>: %{y}<br>'+ '<b>Player Average</b>: %{marker.size}<br>'+ '<b>League Average</b>: %{text}' )], layout= { 'title': {'text':f'{team_name} Scoring Average > League Scoring Average <br> For Given Shot Location 2018-2019 Season'}, 'template': 'plotly_white', 'width': 800, 'height': 700, 'xaxis':{ 'type':'linear', 'range':[-10,99], 'autorange': True, 'showgrid': False, 'zeroline': False }, 'yaxis': { 'type': 'linear', 'range':[-43,43], 'autorange': False, 'showgrid': False, 'zeroline': False }, 'shapes': [ { 'x0': '0', 'x1':'0', 'y0': '42.5', 'y1': '-42.5', 'line': {'color':'red','width': 2, 'dash': 'dash'}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '42.5', 'y1': '42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '-42.5', 'y1': '-42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '100', 'x1': '100', 'y0': '22', 'y1': '-22', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 42.5 C 99 42.5, 99 32, 100 22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 -42.5 C 99 -42.5, 99 -32, 100 -22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0':'89', 'x1':'89', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'red','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0':'25', 'x1':'25', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'blue','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 1}, 'path': 'M 85 4 C 83 4, 83 -4, 85 -4', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '4', 'y1': '4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '-4', 'y1': '-4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1': '91', 'y0': '2', 'y1': '-2', 'line': {'width': 1}, 'layer': 'above', 'type': 'rect', 'xref': 'x', 'yref': 'y' } ], 'annotations': [ { 'x': 94, 'y': 0, 'text': 'Goal', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 91.5, 'y': 30, 'text': 'Goal Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 27.5, 'y': 30, 'text': 'Blue Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 2.5, 'y': 24, 'text': 'Center Red Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False } ] }) display(fig) teams = (sorted(list(df['team_name'].unique()))) teams.insert(0,'Select A Team') dropdown_team = widgets.Dropdown(options=teams) dropdown_team.observe(get_team_data,names='value') graph_widgets = widgets.HBox([output_teamPlot,output_teamvsleague]) tab = widgets.Tab([output_team_table, graph_widgets]) tab.set_title(0, f'Team Table') tab.set_title(1, f'Team Graphs') dashboard = widgets.VBox([dropdown_team, tab]) display(dashboard) ```
github_jupyter
import numpy as np import pandas as pd import plotly.graph_objects as go import ipywidgets as widgets from IPython.display import display np.seterr(divide = 'ignore') df = pd.read_csv('player_v_leaguedf.csv') output_team_table = widgets.Output() def get_team_data(team_name): output_team_table.clear_output() with output_team_table: team_df = df.query(f'team_name == "{team_name.new}" and player_avg > 0') fig = go.Figure(data=[go.Table( header=dict(values=[team_df.columns[0],team_df.columns[1],team_df.columns[2],team_df.columns[4],team_df.columns[5],team_df.columns[6],team_df.columns[8],team_df.columns[7],team_df.columns[10]], fill_color='paleturquoise', align='left'), cells=dict(values=[team_df.x, team_df.y, team_df.xy, team_df.first_name, team_df.last_name, team_df.team_name, team_df.goals, team_df.shots, team_df.player_avg], fill_color='lavender', align='left')) ]) fig.update_layout(title= f'{team_name.new} Player Shot Averages > 0') display(go.FigureWidget(fig)) displayTeamShotPlot(df,team_name.new) player_vs_league(df,team_name.new) output_teamPlot = widgets.Output() def displayTeamShotPlot(df,team_name): output_teamPlot.clear_output() with output_teamPlot: fig = go.FigureWidget(data=[go.Histogram2d(x=df.query(f'team_name == "{team_name}" and goals != 0')['x'].abs(), y=df.query(f'team_name == "{team_name}" and goals != 0')['y'], colorbar={'title':{ 'text': 'Counts' } }, colorscale='YlGnBu', hovertemplate= '<b>X</b>: %{x}<br><extra></extra>'+ '<b>Y</b>: %{y}<br>'+ '<b>Goals</b>: %{z}<br>' )], layout= { 'title': {'text':f'Density Plot of {team_name} Goal Shot Locations, 2018-2019 Season'}, 'legend':{'title':{'text':'Count','side':'top'}}, 'xaxis_title_text':'X Coordiante', 'yaxis_title_text':'Y Coordiante', 'template': 'plotly_white', 'xaxis':{ 'type':'linear', 'range':[-10,99], 'autorange': True, 'showgrid': False, 'zeroline': False }, 'yaxis': { 'type': 'linear', 'range':[-43,43], 'autorange': False, 'showgrid': False, 'zeroline': False }, 'shapes': [ { 'x0': '0', 'x1':'0', 'y0': '42.5', 'y1': '-42.5', 'line': {'color':'red','width': 2, 'dash': 'dash'}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '42.5', 'y1': '42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '-42.5', 'y1': '-42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '100', 'x1': '100', 'y0': '22', 'y1': '-22', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 42.5 C 99 42.5, 99 32, 100 22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 -42.5 C 99 -42.5, 99 -32, 100 -22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0':'89', 'x1':'89', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'red','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0':'25', 'x1':'25', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'blue','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 1}, 'path': 'M 85 4 C 83 4, 83 -4, 85 -4', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '4', 'y1': '4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '-4', 'y1': '-4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1': '91', 'y0': '2', 'y1': '-2', 'line': {'width': 1}, 'layer': 'above', 'type': 'rect', 'xref': 'x', 'yref': 'y' } ], 'annotations': [ { 'x': 92.5, 'y': 0, 'text': 'Goal', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 90.5, 'y': 37, 'text': 'Goal Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 26.5, 'y': 37, 'text': 'Blue Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 1.5, 'y': 34, 'text': 'Center Red Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False } ] }) display(fig) output_teamvsleague = widgets.Output() def player_vs_league(df,team_name): output_teamvsleague.clear_output() with output_teamvsleague: fig = go.FigureWidget(data=[go.Scattergl(x=df.query(f'team_name == "{team_name}" and player_avg > league_avg and player_avg != 1')['x'].abs(),y=df.query(f'team_name == "{team_name}"')['y'], marker={'size':df.query(f'team_name == "{team_name}" and player_avg > league_avg and player_avg != 1')['player_avg'],'sizemin':1,'sizeref':.05}, mode='markers', text= df.query(f'team_name == "{team_name}" and player_avg > league_avg and player_avg != 1')['league_avg'], hovertemplate= '<b>X</b>: %{x}<br><extra></extra>'+ '<b>Y</b>: %{y}<br>'+ '<b>Player Average</b>: %{marker.size}<br>'+ '<b>League Average</b>: %{text}' )], layout= { 'title': {'text':f'{team_name} Scoring Average > League Scoring Average <br> For Given Shot Location 2018-2019 Season'}, 'template': 'plotly_white', 'width': 800, 'height': 700, 'xaxis':{ 'type':'linear', 'range':[-10,99], 'autorange': True, 'showgrid': False, 'zeroline': False }, 'yaxis': { 'type': 'linear', 'range':[-43,43], 'autorange': False, 'showgrid': False, 'zeroline': False }, 'shapes': [ { 'x0': '0', 'x1':'0', 'y0': '42.5', 'y1': '-42.5', 'line': {'color':'red','width': 2, 'dash': 'dash'}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '42.5', 'y1': '42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '-1', 'x1': '89', 'y0': '-42.5', 'y1': '-42.5', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '100', 'x1': '100', 'y0': '22', 'y1': '-22', 'line': {'width': 2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 42.5 C 99 42.5, 99 32, 100 22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 2}, 'path': 'M 89 -42.5 C 99 -42.5, 99 -32, 100 -22', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0':'89', 'x1':'89', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'red','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0':'25', 'x1':'25', 'y0':'42.5', 'y1':'-42.5', 'line':{'color':'blue','width':2}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'line': {'width': 1}, 'path': 'M 85 4 C 83 4, 83 -4, 85 -4', 'type': 'path', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '4', 'y1': '4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1':'85', 'y0': '-4', 'y1': '-4', 'line': {'width': 1}, 'layer': 'above', 'type': 'line', 'xref': 'x', 'yref': 'y' }, { 'x0': '89', 'x1': '91', 'y0': '2', 'y1': '-2', 'line': {'width': 1}, 'layer': 'above', 'type': 'rect', 'xref': 'x', 'yref': 'y' } ], 'annotations': [ { 'x': 94, 'y': 0, 'text': 'Goal', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 91.5, 'y': 30, 'text': 'Goal Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 27.5, 'y': 30, 'text': 'Blue Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False }, { 'x': 2.5, 'y': 24, 'text': 'Center Red Line', 'textangle': 90, 'xref': 'x', 'yref': 'y', 'showarrow': False } ] }) display(fig) teams = (sorted(list(df['team_name'].unique()))) teams.insert(0,'Select A Team') dropdown_team = widgets.Dropdown(options=teams) dropdown_team.observe(get_team_data,names='value') graph_widgets = widgets.HBox([output_teamPlot,output_teamvsleague]) tab = widgets.Tab([output_team_table, graph_widgets]) tab.set_title(0, f'Team Table') tab.set_title(1, f'Team Graphs') dashboard = widgets.VBox([dropdown_team, tab]) display(dashboard)
0.488771
0.264062
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import sklearn.linear_model as lm from scipy.stats import spearmanr from scipy.stats import pearsonr from datetime import datetime today = datetime.now().strftime('%m%d%Y') import sys import importlib sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev') import snmcseq_utils importlib.reload(snmcseq_utils) import CEMBA_clst_utils from __init__jupyterlab import * np.random.seed(0) ``` ## Note - train a linear model RNA ~ promoter kmers for each cell type - examine result: ~0.38 r2 spearmanr; ~0.48 r2 on mean across cell types ## to update - calculate non-redundant kmer - cross validation # import data ``` output_fig = '/cndd2/fangming/projects/scf_enhancers/results/figures/promoter_{{}}_{}.pdf'.format(today) output_fig # get promoter kmer kmer_nums = [2, 3, 4, 5] promoter_kmer_list = [] data_path_prom = '/cndd2/ethan/projects/enh_gene_linkage/enhancer_sequence/data/promoter_sort_kmer_{}_bases_1000.tsv' for k in kmer_nums: prom = pd.read_csv(data_path_prom.format(k), sep='\t').set_index('0') promoter_kmer_list.append(prom) promoter_kmers = pd.concat(promoter_kmer_list, axis=1) f = '/cndd2/ethan/projects/enh_gene_linkage/enhancer_sequence/data/promoter_sort_center_1000.bed' genes = pd.read_csv(f, sep='\t', header=None) genes[3] = [i.split('.')[0] for i in genes[3]] genes['kmer_format'] = '>' + genes[0] + ':' + genes[1].astype(str) + '-' + genes[2].astype(str) promoter_kmers = promoter_kmers.loc[genes['kmer_format'].values] promoter_kmers['gene'] = genes[3].values kmers = promoter_kmers.set_index('gene') expression_dir = '/cndd2/ethan/projects/enh_gene_linkage/data/enhancer_ethan38_200520/results/gene_counts_{}' expression = pd.read_csv(expression_dir.format('10x_cells_v3_ethan38.tsv'), sep='\t').set_index('Unnamed: 0') expression = expression.drop('Unnamed: 39', axis=1) expression = snmcseq_utils.logcpm(expression) expression = expression.reindex(kmers.index) expression = expression.loc[expression.isna().sum(axis=1)==0] # remove nan expression.shape kmers = kmers.loc[expression.index] expression.shape, kmers.shape expression.head() kmers.shape ``` # set up model ``` X = kmers.values y = expression.values ngenes = len(y) train = np.random.choice(np.arange(ngenes), round(ngenes*0.9), replace=False) test = np.setdiff1d(np.arange(ngenes), train) xtrain = X[train, :] ytrain = y[train, :] xtest = X[test, :] ytest = y[test, :] print(xtrain.shape, ytrain.shape) print(xtest.shape, ytest.shape) ``` # Train model ``` model = lm.LinearRegression(normalize=True) model = model.fit(xtrain, ytrain) # a separate model for each cell type trainhat = model.predict(xtrain) testhat = model.predict(xtest) r, p = spearmanr(trainhat.flatten(), ytrain.flatten()) r_test, p_test = spearmanr(testhat.flatten(), ytest.flatten()) results = [ { 'x': ytrain.flatten(), 'y': trainhat.flatten(), 'title': 'Training', 'r': r, }, { 'x': ytest.flatten(), 'y': testhat.flatten(), 'title': 'Testing', 'r': r_test, }, ] fig, axs = plt.subplots(1, 2, figsize=(5*2,4)) for ax, result in zip(axs, results): z = snmcseq_utils.scatter_density(result['x'], result['y'], p=.0001) im = ax.scatter(result['x'], result['y'], c=z, s=1, cmap='magma', rasterized=True) cbar = ax.figure.colorbar(im, ax=ax) cbar.set_label('Density', rotation=270, labelpad=10) ax.set_title("{}, r2={:.2f}".format(result['title'], result['r']**2)) ax.set_ylabel('predicted value') ax.set_xlabel('true value') fig.suptitle('Gene expresion (log10CPM+1)', fontsize=15) fig.tight_layout() snmcseq_utils.savefig(fig, output_fig.format('pred_vs_true')) ``` # Check per cluster ``` rtest = [] rtrain = [] for i in range(ytest.shape[1]): clust_testhat = testhat[:, i] clust_test = ytest[:, i] clust_trainhat = trainhat[:, i] clust_train = ytrain[:, i] rtest.append(spearmanr(clust_testhat, clust_test)[0]) rtrain.append(spearmanr(clust_trainhat, clust_train)[0]) fig, ax = plt.subplots(figsize=(8,6)) ax.plot(np.arange(len(rtrain)), np.square(rtrain), 'o-', label ='train') ax.plot(np.arange(len(rtest)), np.square(rtest), 'o-', label ='test') ax.set_xticks(np.arange(len(rtest))) ax.set_xticklabels(expression.columns, rotation=90) ax.set_title('per cluster variance explained linear regression') ax.set_xlabel('cluster') ax.set_ylabel('Pearson R2 value') ax.legend(bbox_to_anchor=(1,1)) snmcseq_utils.savefig(fig, output_fig.format('clster_lin_reg_rval')) ``` # Check against mean value in tissue ``` model_ = lm.LinearRegression(normalize=True) yuse = np.mean(ytrain, axis=1) yuse_test = np.mean(ytest, axis=1) model_ = model_.fit(xtrain, yuse) trainhat_ = model_.predict(xtrain) testhat_ = model_.predict(xtest) r_, p_ = spearmanr(trainhat_, yuse) r_test_, p_ = spearmanr(trainhat_, yuse) results = [ { 'x': yuse, 'y': trainhat_, 'title': 'Training', 'r': r_, }, { 'x': yuse_test, 'y': testhat_, 'title': 'Testing', 'r': r_test_, }, ] fig, axs = plt.subplots(1, 2, figsize=(5*2,4)) for ax, result in zip(axs, results): z = snmcseq_utils.scatter_density(result['x'], result['y'], p=.01) im = ax.scatter(result['x'], result['y'], c=z, s=1, cmap='magma', rasterized=True) cbar = ax.figure.colorbar(im, ax=ax) cbar.set_label('Density', rotation=270, labelpad=10) ax.set_title("{}, r2={:.2f}".format(result['title'], result['r']**2)) ax.set_ylabel('predicted value') ax.set_xlabel('true value') fig.suptitle('Gene expresion (log10CPM+1)', fontsize=15) fig.tight_layout() snmcseq_utils.savefig(fig, output_fig.format('mean_expresion_across_clusters_pred_vs_true')) ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt import sklearn.linear_model as lm from scipy.stats import spearmanr from scipy.stats import pearsonr from datetime import datetime today = datetime.now().strftime('%m%d%Y') import sys import importlib sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev') import snmcseq_utils importlib.reload(snmcseq_utils) import CEMBA_clst_utils from __init__jupyterlab import * np.random.seed(0) output_fig = '/cndd2/fangming/projects/scf_enhancers/results/figures/promoter_{{}}_{}.pdf'.format(today) output_fig # get promoter kmer kmer_nums = [2, 3, 4, 5] promoter_kmer_list = [] data_path_prom = '/cndd2/ethan/projects/enh_gene_linkage/enhancer_sequence/data/promoter_sort_kmer_{}_bases_1000.tsv' for k in kmer_nums: prom = pd.read_csv(data_path_prom.format(k), sep='\t').set_index('0') promoter_kmer_list.append(prom) promoter_kmers = pd.concat(promoter_kmer_list, axis=1) f = '/cndd2/ethan/projects/enh_gene_linkage/enhancer_sequence/data/promoter_sort_center_1000.bed' genes = pd.read_csv(f, sep='\t', header=None) genes[3] = [i.split('.')[0] for i in genes[3]] genes['kmer_format'] = '>' + genes[0] + ':' + genes[1].astype(str) + '-' + genes[2].astype(str) promoter_kmers = promoter_kmers.loc[genes['kmer_format'].values] promoter_kmers['gene'] = genes[3].values kmers = promoter_kmers.set_index('gene') expression_dir = '/cndd2/ethan/projects/enh_gene_linkage/data/enhancer_ethan38_200520/results/gene_counts_{}' expression = pd.read_csv(expression_dir.format('10x_cells_v3_ethan38.tsv'), sep='\t').set_index('Unnamed: 0') expression = expression.drop('Unnamed: 39', axis=1) expression = snmcseq_utils.logcpm(expression) expression = expression.reindex(kmers.index) expression = expression.loc[expression.isna().sum(axis=1)==0] # remove nan expression.shape kmers = kmers.loc[expression.index] expression.shape, kmers.shape expression.head() kmers.shape X = kmers.values y = expression.values ngenes = len(y) train = np.random.choice(np.arange(ngenes), round(ngenes*0.9), replace=False) test = np.setdiff1d(np.arange(ngenes), train) xtrain = X[train, :] ytrain = y[train, :] xtest = X[test, :] ytest = y[test, :] print(xtrain.shape, ytrain.shape) print(xtest.shape, ytest.shape) model = lm.LinearRegression(normalize=True) model = model.fit(xtrain, ytrain) # a separate model for each cell type trainhat = model.predict(xtrain) testhat = model.predict(xtest) r, p = spearmanr(trainhat.flatten(), ytrain.flatten()) r_test, p_test = spearmanr(testhat.flatten(), ytest.flatten()) results = [ { 'x': ytrain.flatten(), 'y': trainhat.flatten(), 'title': 'Training', 'r': r, }, { 'x': ytest.flatten(), 'y': testhat.flatten(), 'title': 'Testing', 'r': r_test, }, ] fig, axs = plt.subplots(1, 2, figsize=(5*2,4)) for ax, result in zip(axs, results): z = snmcseq_utils.scatter_density(result['x'], result['y'], p=.0001) im = ax.scatter(result['x'], result['y'], c=z, s=1, cmap='magma', rasterized=True) cbar = ax.figure.colorbar(im, ax=ax) cbar.set_label('Density', rotation=270, labelpad=10) ax.set_title("{}, r2={:.2f}".format(result['title'], result['r']**2)) ax.set_ylabel('predicted value') ax.set_xlabel('true value') fig.suptitle('Gene expresion (log10CPM+1)', fontsize=15) fig.tight_layout() snmcseq_utils.savefig(fig, output_fig.format('pred_vs_true')) rtest = [] rtrain = [] for i in range(ytest.shape[1]): clust_testhat = testhat[:, i] clust_test = ytest[:, i] clust_trainhat = trainhat[:, i] clust_train = ytrain[:, i] rtest.append(spearmanr(clust_testhat, clust_test)[0]) rtrain.append(spearmanr(clust_trainhat, clust_train)[0]) fig, ax = plt.subplots(figsize=(8,6)) ax.plot(np.arange(len(rtrain)), np.square(rtrain), 'o-', label ='train') ax.plot(np.arange(len(rtest)), np.square(rtest), 'o-', label ='test') ax.set_xticks(np.arange(len(rtest))) ax.set_xticklabels(expression.columns, rotation=90) ax.set_title('per cluster variance explained linear regression') ax.set_xlabel('cluster') ax.set_ylabel('Pearson R2 value') ax.legend(bbox_to_anchor=(1,1)) snmcseq_utils.savefig(fig, output_fig.format('clster_lin_reg_rval')) model_ = lm.LinearRegression(normalize=True) yuse = np.mean(ytrain, axis=1) yuse_test = np.mean(ytest, axis=1) model_ = model_.fit(xtrain, yuse) trainhat_ = model_.predict(xtrain) testhat_ = model_.predict(xtest) r_, p_ = spearmanr(trainhat_, yuse) r_test_, p_ = spearmanr(trainhat_, yuse) results = [ { 'x': yuse, 'y': trainhat_, 'title': 'Training', 'r': r_, }, { 'x': yuse_test, 'y': testhat_, 'title': 'Testing', 'r': r_test_, }, ] fig, axs = plt.subplots(1, 2, figsize=(5*2,4)) for ax, result in zip(axs, results): z = snmcseq_utils.scatter_density(result['x'], result['y'], p=.01) im = ax.scatter(result['x'], result['y'], c=z, s=1, cmap='magma', rasterized=True) cbar = ax.figure.colorbar(im, ax=ax) cbar.set_label('Density', rotation=270, labelpad=10) ax.set_title("{}, r2={:.2f}".format(result['title'], result['r']**2)) ax.set_ylabel('predicted value') ax.set_xlabel('true value') fig.suptitle('Gene expresion (log10CPM+1)', fontsize=15) fig.tight_layout() snmcseq_utils.savefig(fig, output_fig.format('mean_expresion_across_clusters_pred_vs_true'))
0.386532
0.714055
# ETL Processes Use this notebook to develop the ETL process for each of your tables before completing the `etl.py` file to load the whole datasets. ``` import os import glob import psycopg2 import pandas as pd from sql_queries import * conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student") cur = conn.cursor() def get_files(filepath): all_files = [] for root, dirs, files in os.walk(filepath): files = glob.glob(os.path.join(root,'*.json')) for f in files : all_files.append(os.path.abspath(f)) return all_files ``` # Process `song_data` In this first part, you'll perform ETL on the first dataset, `song_data`, to create the `songs` and `artists` dimensional tables. Let's perform ETL on a single song file and load a single record into each table to start. - Use the `get_files` function provided above to get a list of all song JSON files in `data/song_data` - Select the first song in this list - Read the song file and view the data ``` song_files = get_files("./data/song_data") filepath = song_files[0] df = pd.DataFrame(pd.read_json(filepath, lines=True, orient='columns')) df.head() ``` ## #1: `songs` Table #### Extract Data for Songs Table - Select columns for song ID, title, artist ID, year, and duration - Use `df.values` to select just the values from the dataframe - Index to select the first (only) record in the dataframe - Convert the array to a list and set it to `song_data` ``` # song ID, title, artist ID, year, and duration song_data = (df.values[0][7], df.values[0][8], df.values[0][0], df.values[0][9], df.values[0][5]) song_data ``` #### Insert Record into Song Table Implement the `song_table_insert` query in `sql_queries.py` and run the cell below to insert a record for this song into the `songs` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `songs` table in the sparkify database. ``` #NOTE: To prevent duplicates use "ON CONFLICT (song_id) DO NOTHING" in SQL query. cur.execute(song_table_insert, song_data) conn.commit() ``` Run `test.ipynb` to see if you've successfully added a record to this table. ## #2: `artists` Table #### Extract Data for Artists Table - Select columns for artist ID, name, location, latitude, and longitude - Use `df.values` to select just the values from the dataframe - Index to select the first (only) record in the dataframe - Convert the array to a list and set it to `artist_data` ``` # artist ID, name, location, latitude, and longitude artist_data = (df.values[0][0], df.values[0][4], df.values[0][2], df.values[0][1], df.values[0][3]) artist_data ``` #### Insert Record into Artist Table Implement the `artist_table_insert` query in `sql_queries.py` and run the cell below to insert a record for this song's artist into the `artists` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `artists` table in the sparkify database. ``` # #NOTE: To prevent duplicates use "ON CONFLICT (artist_id) DO NOTHING" in SQL query. cur.execute(artist_table_insert, artist_data) conn.commit() ``` Run `test.ipynb` to see if you've successfully added a record to this table. # Process `log_data` In this part, you'll perform ETL on the second dataset, `log_data`, to create the `time` and `users` dimensional tables, as well as the `songplays` fact table. Let's perform ETL on a single log file and load a single record into each table. - Use the `get_files` function provided above to get a list of all log JSON files in `data/log_data` - Select the first log file in this list - Read the log file and view the data ``` log_files = get_files("./data/log_data") filepath = log_files[0] df = pd.DataFrame(pd.read_json(filepath, lines=True, orient='columns')) df_orig = df df ``` ## #3: `time` Table #### Extract Data for Time Table - Filter records by `NextSong` action - Convert the `ts` timestamp column to datetime - Hint: the current timestamp is in milliseconds - Extract the timestamp, hour, day, week of year, month, year, and weekday from the `ts` column and set `time_data` to a list containing these values in order - Hint: use pandas' [`dt` attribute](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.html) to access easily datetimelike properties. - Specify labels for these columns and set to `column_labels` - Create a dataframe, `time_df,` containing the time data for this file by combining `column_labels` and `time_data` into a dictionary and converting this into a dataframe ``` # See: https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/ # "filter rows for year 2002 using the boolean variable" # df['page']=='NextSong' tells if record has NextSong value in page columns (true/false). # df[df['page']=='NextSong'] filters those records == leaves out records having some other value. df = df[df['page']=='NextSong'] #df.head() t = pd.to_datetime(df['ts'], unit='ms') #t.head() # See: https://www.geeksforgeeks.org/different-ways-to-create-pandas-dataframe/ # See: https://stackoverflow.com/questions/44741587/pandas-timestamp-series-to-string time_data = list(zip(t.dt.strftime('%Y-%m-%d %I:%M:%S'), t.dt.hour, t.dt.day, t.dt.week, t.dt.month, t.dt.year, t.dt.weekday)) column_labels = ('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday') #time_data time_df = pd.DataFrame(time_data, columns=column_labels) for i, row in time_df.iterrows(): print(list(row)) ``` #### Insert Records into Time Table Implement the `time_table_insert` query in `sql_queries.py` and run the cell below to insert records for the timestamps in this log file into the `time` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `time` table in the sparkify database. ``` for i, row in time_df.iterrows(): cur.execute(time_table_insert, list(row)) conn.commit() ``` Run `test.ipynb` to see if you've successfully added records to this table. ## #4: `users` Table #### Extract Data for Users Table - Select columns for user ID, first name, last name, gender and level and set to `user_df` ``` user_data = df_orig.get(['userId', 'firstName', 'lastName', 'gender', 'level']) # adjust column names user_data.columns = ['user_id', 'first_name', 'last_name', 'gender', 'level'] # remove rows with no user_id user_data_clean = user_data[user_data['user_id']!= ''] # remove duplicates user_data_duplicates_removed = user_data_clean.drop_duplicates('user_id', keep='first') user_df = user_data_duplicates_removed #user_df ``` #### Insert Records into Users Table Implement the `user_table_insert` query in `sql_queries.py` and run the cell below to insert records for the users in this log file into the `users` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `users` table in the sparkify database. ``` # #NOTE: To prevent duplicates use "ON CONFLICT (user_id) DO NOTHING" in SQL query. for i, row in user_df.iterrows(): cur.execute(user_table_insert, row) conn.commit() ``` Run `test.ipynb` to see if you've successfully added records to this table. ## #5: `songplays` Table #### Extract Data and Songplays Table This one is a little more complicated since information from the songs table, artists table, and original log file are all needed for the `songplays` table. Since the log file does not specify an ID for either the song or the artist, you'll need to get the song ID and artist ID by querying the songs and artists tables to find matches based on song title, artist name, and song duration time. - Implement the `song_select` query in `sql_queries.py` to find the song ID and artist ID based on the title, artist name, and duration of a song. - Select the timestamp, user ID, level, song ID, artist ID, session ID, location, and user agent and set to `songplay_data` #### Insert Records into Songplays Table - Implement the `songplay_table_insert` query and run the cell below to insert records for the songplay actions in this log file into the `songplays` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `songplays` table in the sparkify database. ``` # NOTE: In Postgresql, use %s as a string operator for incoming variables # e.g. INSERT ...VALUES (%s, %s) or SELECT ...WHERE s.name = %s AND a.artist_id = %s AND s.length = %s for index, row in df.iterrows(): # get songid and artistid from song and artist tables cur.execute(song_select, (row.song, row.artist, row.length)) results = cur.fetchone() if results: songid, artistid = results else: songid, artistid = None, None #print(songid, artistid) start_time = pd.to_datetime(row.ts, unit='ms').strftime('%Y-%m-%d %I:%M:%S') songplay_data = (start_time, row.userId, row.level, str(songid), str(artistid), row.sessionId, row.location, row.userAgent) cur.execute(songplay_table_insert, songplay_data) conn.commit() ``` Run `test.ipynb` to see if you've successfully added records to this table. # Close Connection to Sparkify Database ``` conn.close() ``` # Implement `etl.py` Use what you've completed in this notebook to implement `etl.py`.
github_jupyter
import os import glob import psycopg2 import pandas as pd from sql_queries import * conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student") cur = conn.cursor() def get_files(filepath): all_files = [] for root, dirs, files in os.walk(filepath): files = glob.glob(os.path.join(root,'*.json')) for f in files : all_files.append(os.path.abspath(f)) return all_files song_files = get_files("./data/song_data") filepath = song_files[0] df = pd.DataFrame(pd.read_json(filepath, lines=True, orient='columns')) df.head() # song ID, title, artist ID, year, and duration song_data = (df.values[0][7], df.values[0][8], df.values[0][0], df.values[0][9], df.values[0][5]) song_data #NOTE: To prevent duplicates use "ON CONFLICT (song_id) DO NOTHING" in SQL query. cur.execute(song_table_insert, song_data) conn.commit() # artist ID, name, location, latitude, and longitude artist_data = (df.values[0][0], df.values[0][4], df.values[0][2], df.values[0][1], df.values[0][3]) artist_data # #NOTE: To prevent duplicates use "ON CONFLICT (artist_id) DO NOTHING" in SQL query. cur.execute(artist_table_insert, artist_data) conn.commit() log_files = get_files("./data/log_data") filepath = log_files[0] df = pd.DataFrame(pd.read_json(filepath, lines=True, orient='columns')) df_orig = df df # See: https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/ # "filter rows for year 2002 using the boolean variable" # df['page']=='NextSong' tells if record has NextSong value in page columns (true/false). # df[df['page']=='NextSong'] filters those records == leaves out records having some other value. df = df[df['page']=='NextSong'] #df.head() t = pd.to_datetime(df['ts'], unit='ms') #t.head() # See: https://www.geeksforgeeks.org/different-ways-to-create-pandas-dataframe/ # See: https://stackoverflow.com/questions/44741587/pandas-timestamp-series-to-string time_data = list(zip(t.dt.strftime('%Y-%m-%d %I:%M:%S'), t.dt.hour, t.dt.day, t.dt.week, t.dt.month, t.dt.year, t.dt.weekday)) column_labels = ('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday') #time_data time_df = pd.DataFrame(time_data, columns=column_labels) for i, row in time_df.iterrows(): print(list(row)) for i, row in time_df.iterrows(): cur.execute(time_table_insert, list(row)) conn.commit() user_data = df_orig.get(['userId', 'firstName', 'lastName', 'gender', 'level']) # adjust column names user_data.columns = ['user_id', 'first_name', 'last_name', 'gender', 'level'] # remove rows with no user_id user_data_clean = user_data[user_data['user_id']!= ''] # remove duplicates user_data_duplicates_removed = user_data_clean.drop_duplicates('user_id', keep='first') user_df = user_data_duplicates_removed #user_df # #NOTE: To prevent duplicates use "ON CONFLICT (user_id) DO NOTHING" in SQL query. for i, row in user_df.iterrows(): cur.execute(user_table_insert, row) conn.commit() # NOTE: In Postgresql, use %s as a string operator for incoming variables # e.g. INSERT ...VALUES (%s, %s) or SELECT ...WHERE s.name = %s AND a.artist_id = %s AND s.length = %s for index, row in df.iterrows(): # get songid and artistid from song and artist tables cur.execute(song_select, (row.song, row.artist, row.length)) results = cur.fetchone() if results: songid, artistid = results else: songid, artistid = None, None #print(songid, artistid) start_time = pd.to_datetime(row.ts, unit='ms').strftime('%Y-%m-%d %I:%M:%S') songplay_data = (start_time, row.userId, row.level, str(songid), str(artistid), row.sessionId, row.location, row.userAgent) cur.execute(songplay_table_insert, songplay_data) conn.commit() conn.close()
0.266548
0.916931
# Setups, Installations and Imports ``` %%capture !pip install tensorflow !pip install tensorflow-addons !pip install bayesian-optimization import tensorflow as tf from tensorflow import keras from tensorflow.keras.datasets import cifar10 from tensorflow.keras.applications import resnet50 import tensorflow_addons as tfa import sys from tensorflow.python.client import device_lib device_lib.list_local_devices() try: from google.colab import drive drive.mount('/content/drive') IN_COLAB = True except: IN_COLAB = False import os import shutil os.environ["TF_DETERMINISTIC_OPS"] = "1" import time import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import io import itertools from functools import partial from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, precision_score, roc_auc_score, recall_score from tqdm.notebook import tqdm_notebook from ipywidgets import IntProgress from sklearn.model_selection import KFold, StratifiedKFold from bayes_opt import BayesianOptimization from sklearn.preprocessing import LabelBinarizer ``` # Download and Prepare Dataset ``` # Insert the directoryimport sys if IN_COLAB: '''this is the exact path of the mounted direttory. in order to set it, right click on colab notebooks folder and copy path, and insert below ''' COLAB_NOTEBOOKES_PATH = "/content/drive/MyDrive/Colab Notebooks" sys.path.insert(0,COLAB_NOTEBOOKES_PATH) import Ensembles_prepare_datasets ``` #### Dataloader ``` AUTO = tf.data.experimental.AUTOTUNE BATCH_SIZE = 128 def preprocess_image(image, label): img = tf.cast(image, tf.float32) img = img/255. return img, label def get_loaders(x_train, y_train, x_test, y_test): trainloader = tf.data.Dataset.from_tensor_slices((x_train, y_train)) testloader = tf.data.Dataset.from_tensor_slices((x_test, y_test)) trainloader = ( trainloader .shuffle(1024) .map(preprocess_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) testloader = ( testloader .map(preprocess_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) return trainloader, testloader def clean_folder(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) ROOT_PATH = './' ## SmallCNN models with different initialization models_dir = 'IndependentSolutions' MODEL_PATH = ROOT_PATH+'IndependentSolutions/' if models_dir not in os.listdir(ROOT_PATH): os.mkdir(models_dir) ``` # Model ``` def Model1(num_labels, img_shape, color = 3): inputs = keras.layers.Input(shape=(img_shape, img_shape, color)) x = keras.layers.Conv2D(16, (3,3), padding='same')(inputs) x = keras.activations.relu(x) x = keras.layers.MaxPooling2D(2, strides=2)(x) x = keras.layers.Conv2D(32,(3,3), padding='same')(x) x = keras.activations.relu(x) x = keras.layers.MaxPooling2D(2, strides=2)(x) x = keras.layers.Conv2D(32,(3,3), padding='same')(x) x = keras.activations.relu(x) x = keras.layers.MaxPooling2D(2, strides=2)(x) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Dense(32, activation='relu')(x) x = keras.layers.Dropout(0.1)(x) outputs = keras.layers.Dense(num_labels, activation='softmax')(x) return keras.models.Model(inputs=inputs, outputs=outputs) def Model(): if color: return Model1(num_labels, img_shape,color) else: return Model1(num_labels, img_shape) ``` # Init hyper parameters ``` learning_rate_range = [0.001,0.1] epsilon_range = [1e-08,1e-05] ``` # Callbacks #### LR Scheduler ``` def lr_schedule(epoch,lr): if (epoch >= 0) & (epoch < 9): return lr elif (epoch >= 9) & (epoch < 19): return lr/2 elif (epoch >= 19) & (epoch < 29): return lr/4 else: return lr/8 #lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda epoch: lr_schedule(epoch), verbose=True) lr_callback = tf.keras.callbacks.LearningRateScheduler(lr_schedule) ``` # Independent Solutions ``` # Saving 5 independent solutions i.e, 5 different inits. # Will save last model snapshot. def run_independent_solutions(): EPOCHS = 10 SAVE_PATH = 'IndependentSolutions/' for iter in tqdm_notebook(range(5)): # initialize weights keras.backend.clear_session() model = Model() #optimizer define with name opt = tf.keras.optimizers.Adam(learning_rate=0.001, epsilon=1e-07, name='Adam'+str(iter)) # compile model.compile(opt, 'sparse_categorical_crossentropy', metrics=['accuracy']) # train _ = model.fit(trainloader, epochs=EPOCHS, validation_data=testloader, callbacks=[lr_callback], verbose=0) # save final model snapshot model.save(SAVE_PATH+'smallcnn_independent_model_{}.h5'.format(iter)) # evaluate loss, accuracy = model.evaluate(testloader, verbose=0) print("Test Error Rate: ", round((1-accuracy)*100, 2), '% | Iteration No: ', iter) def run_independent_solutions(lr,ep,trainloader,testloader): EPOCHS = 10 SAVE_PATH = 'IndependentSolutions/' for iter in tqdm_notebook(range(5)): # initialize weights keras.backend.clear_session() model = Model() #optimizer define with name opt = tf.keras.optimizers.Adam(learning_rate=lr, epsilon=ep, name='Adam'+str(iter)) # compile model.compile(opt, 'sparse_categorical_crossentropy', metrics=['accuracy']) # train _ = model.fit(trainloader, epochs=EPOCHS, validation_data=testloader, callbacks=[lr_callback], verbose=0) # save final model snapshot model.save(SAVE_PATH+'smallcnn_independent_model_{}.h5'.format(iter)) # evaluate loss, accuracy = model.evaluate(testloader, verbose=0) print("Test Error Rate: ", round((1-accuracy)*100, 2), '% | Iteration No: ', iter) #run_independent_solutions() def run_independent_solutions_additive(lr,ep,trainloader,testloader,f_x_test,f_y_test): EPOCHS = 10 SAVE_PATH = 'IndependentSolutions/' clean_folder(SAVE_PATH) accuracy_list = [0] improvments = [] for iter in tqdm_notebook(range(10)): # initialize weights keras.backend.clear_session() model = Model() #optimizer define with name opt = tf.keras.optimizers.Adam(learning_rate=lr, epsilon=ep, name='Adam'+str(iter)) # compile model.compile(opt, 'sparse_categorical_crossentropy', metrics=['accuracy']) # train _ = model.fit(trainloader, epochs=EPOCHS, validation_data=testloader, callbacks=[lr_callback], verbose=0) # save final model snapshot model.save(SAVE_PATH+'smallcnn_independent_model_{}.h5'.format(iter)) # evaluate loss, accuracy = model.evaluate(testloader, verbose=0) print("Test Error Rate: ", round((1-accuracy)*100, 2), '% | Iteration No: ', iter) model_ckpts = os.listdir(MODEL_PATH) members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] yhat = ensemble_predictions(members,f_x_test) accuracy = 100*accuracy_score(f_y_test, yhat) accuracy_list.append(accuracy) #improvments = [j-i for i, j in zip(accuracy_list[:-1], accuracy_list[1:])] improvments.append(accuracy_list[-1]-accuracy_list[-2]) print(accuracy_list) print(improvments) if(len(improvments) > 3): if(improvments[-1] < np.mean(improvments[-4:-1])*1.00): print("found best amount of models: " + str(len(improvments))) return print('did not find amount of models better then maximum') #run_independent_solutions() arr = [1,2,3,4,5,6,7,8,9] for i, j in zip(arr[:-1], arr[1:]): print(i,j) print(arr[-4:-1]) print(arr[-1]-arr[-2]) ``` # Ensembles # Deep Ensembles ``` # make an ensemble prediction for multi-class classification def ensemble_predictions(members): # make predictions yhats = [model.predict(x_test/255.) for model in members] yhats = np.array(yhats) # sum across ensemble members summed = np.sum(yhats, axis=0) # argmax across classes result = np.argmax(summed, axis=1) return result # evaluate a specific number of members in an ensemble def evaluate_n_members(members): accuracy_list = [] for n_members in tqdm_notebook(range(1, len(members))): # select a subset of members subset = members[:n_members] print(len(subset)) # make prediction yhat = ensemble_predictions(subset) # calculate accuracy accuracy_list.append(100*accuracy_score(y_test, yhat)) return accuracy_list def ensemble_predictions(members, test): # make predictions yhats = [model.predict(test/255.) for model in members] yhats = np.array(yhats) # sum across ensemble members summed = np.sum(yhats, axis=0) # argmax across classes result = np.argmax(summed, axis=1) return result # Run the ensembles for plot def run_ensembles_and_plot(): members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] single_model = np.random.choice(members) accuracy_single_model = single_model.evaluate(testloader)[1] * 100 accuracy_list = evaluate_n_members(members) accuracy_list.insert(0, accuracy_single_model) rng = [i for i in range(0, len(members))] plt.figure(figsize=(9,8)) plt.plot(rng, accuracy_list, label='deep ensemble') plt.plot(rng, [accuracy_single_model]*len(rng), '--', label='single model') plt.title("Test accuracy as a function of ensemble size") plt.xlabel("Ensemble size") plt.ylabel("Test accuracy") plt.legend(); plt.savefig('ensemble_func.png') #run_ensembles_and_plot() # Bayesian Optimization with k fold validation def iner_kfold(x_train, y_train, lr, ep): kf = KFold(n_splits = 3, random_state=3, shuffle=True) accuracy_list = [] for train_index, test_index in kf.split(x_train): f_x_train, f_x_test = x_train[train_index], x_train[test_index] f_y_train, f_y_test = y_train[train_index], y_train[test_index] trainloader, testloader = get_loaders(f_x_train, f_y_train, f_x_test, f_y_test) run_independent_solutions(lr, ep,trainloader,testloader) model_ckpts = os.listdir(MODEL_PATH) members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] yhat = ensemble_predictions(members,f_x_test) accuracy = 100*accuracy_score(f_y_test, yhat) accuracy_list.append(accuracy) print('accuracy:', np.mean(accuracy_list), '+/-', np.std(accuracy_list)) return np.mean(accuracy_list) def kfold_opt(x_train, y_train): print("finding best hyper parameters") fit_with_partial = partial(iner_kfold, x_train, y_train) pbounds = { 'lr': (0.0001,0.01), 'ep': (1e-8,1e-5)} optimizer = BayesianOptimization( f=fit_with_partial, pbounds=pbounds, verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent random_state=1, ) optimizer.maximize(init_points=10, n_iter=10) for i, res in enumerate(optimizer.res): print("Iteration {}: \n\t{}".format(i, res)) print(optimizer.max) return optimizer.max['params']['lr'], optimizer.max['params']['ep'] #claculating the result TPR and FPR def tpr_fpr(y_true, y_prediction): cnf_matrix = confusion_matrix(y_true, y_prediction) FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix) FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix) TP = np.diag(cnf_matrix) TN = cnf_matrix.sum() - (FP + FN + TP) FP = FP.astype(float).sum() FN = FN.astype(float).sum() TP = TP.astype(float).sum() TN = TN.astype(float).sum() # Sensitivity, hit rate, recall, or true positive rate TPR = TP/(TP+FN) # Specificity or true negative rate TNR = TN/(TN+FP) # Precision or positive predictive value PPV = TP/(TP+FP) # Negative predictive value NPV = TN/(TN+FN) # Fall out or false positive rate FPR = FP/(FP+TN) # False negative rate FNR = FN/(TP+FN) # False discovery rate FDR = FP/(TP+FP) # Overall accuracy ACC = (TP+TN)/(TP+FP+FN+TN) return TPR, FPR #writing the results to a file def write_res(name, algo, cros_val, hp_lr, hp_ep, accu, tpr, fpr, preci, auc, train_time): file_name = name + '-' + algo + ".csv" print(file_name) f = open(file_name, "a") f.write(name+','+algo+','+str(cros_val)+','+str(hp_lr)+','+str(hp_ep)+',' +str(accu)+','+str(tpr)+','+str(fpr)+','+str(preci)+','+str(auc)+','+str(train_time)+'\n') f.close() #get AUC of multiclass results def multiclass_roc_auc_score(y_test, y_pred, average="macro"): lb = LabelBinarizer() lb.fit(y_test) y_test = lb.transform(y_test) y_pred = lb.transform(y_pred) return roc_auc_score(y_test, y_pred, average=average) # validate preformence result with kfold def kfold_val(): f = open(n + "-improved_algo.csv", "w") f.write('name, algo, cros_val, hp_lr, hp_ep, accu, tpr, fpr, preci, auc, train_time\n') f.close() kf = KFold(n_splits=10, random_state=3, shuffle=True) #should be 10 start1 = time.time() lr=0.0005 ep=1e-07 accuracy_list = [] training_times = [] first_fold = False#True cros_iter = 1 for train_index, test_index in kf.split(x): print("TRAIN:", train_index, "TEST:", test_index) f_x_train, f_x_test = x[train_index], x[test_index] f_y_train, f_y_test = y[train_index], y[test_index] start2 = time.time() if first_fold: #find best hyperparameters to use lr, ep = kfold_opt(f_x_train, f_y_train) first_fold = False trainloader, testloader = get_loaders(f_x_train, f_y_train, f_x_test, f_y_test) run_independent_solutions_additive(lr,ep,trainloader,testloader,f_x_test,f_y_test) end2 = time.time() model_ckpts = os.listdir(MODEL_PATH) members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] yhat = ensemble_predictions(members,f_x_test) ##### infer results and saving accuracy = 100*accuracy_score(f_y_test, yhat) TPR, FPR = tpr_fpr(f_y_test, yhat) precision = precision_score(f_y_test, yhat, average='weighted') auc = multiclass_roc_auc_score(f_y_test,yhat) accuracy_list.append(accuracy) train_time = end2 - start2 training_times.append(train_time) print("accuracy: {}".format(accuracy)) write_res(n, 'improved_algo', cros_iter, lr, ep, accuracy, TPR, FPR, precision, auc, train_time) cros_iter = cros_iter + 1 end1 = time.time() print('accuracy:', np.mean(accuracy_list), '+/-', np.std(accuracy_list)) print("avarage accuracy: {}".format(np.average(accuracy_list)) ) print("Network takes {:.3f} minuts to fold validate".format((end1 - start1)/60)) #kfold_val() #model_list = ['cifar10', 'cifar100', 'mnist', 'flowers102', 'monkeys10', 'intel','cifar10_2', 'cifar100_2', 'mnist_2', 'flowers102_2', 'monkeys10_2', 'intel_2'] #model_list = ['cifar100', 'cifar100_2', 'flowers102', 'flowers102_2'] model_list = ['cifar10'] color = 0 for n in model_list: if n == 'cifar10': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar10() elif n == 'cifar10_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar10() elif n == 'cifar100': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar100() elif n == 'cifar100_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar100() elif n == 'mnist': x,x2,y,y2,num_labels,img_shape,color = Ensembles_prepare_datasets.load_mnist() elif n == 'mnist_2': x1,x,y1,y,num_labels,img_shape,color = Ensembles_prepare_datasets.load_mnist() elif n == 'flowers102': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_flowers102() elif n == 'flowers102_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_flowers102() elif n == 'monkeys10': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_monkeys10() elif n == 'monkeys10_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_monkeys10() elif n == 'intel': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_intel() elif n == 'intel_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_intel() kfold_val() ```
github_jupyter
%%capture !pip install tensorflow !pip install tensorflow-addons !pip install bayesian-optimization import tensorflow as tf from tensorflow import keras from tensorflow.keras.datasets import cifar10 from tensorflow.keras.applications import resnet50 import tensorflow_addons as tfa import sys from tensorflow.python.client import device_lib device_lib.list_local_devices() try: from google.colab import drive drive.mount('/content/drive') IN_COLAB = True except: IN_COLAB = False import os import shutil os.environ["TF_DETERMINISTIC_OPS"] = "1" import time import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import io import itertools from functools import partial from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, precision_score, roc_auc_score, recall_score from tqdm.notebook import tqdm_notebook from ipywidgets import IntProgress from sklearn.model_selection import KFold, StratifiedKFold from bayes_opt import BayesianOptimization from sklearn.preprocessing import LabelBinarizer # Insert the directoryimport sys if IN_COLAB: '''this is the exact path of the mounted direttory. in order to set it, right click on colab notebooks folder and copy path, and insert below ''' COLAB_NOTEBOOKES_PATH = "/content/drive/MyDrive/Colab Notebooks" sys.path.insert(0,COLAB_NOTEBOOKES_PATH) import Ensembles_prepare_datasets AUTO = tf.data.experimental.AUTOTUNE BATCH_SIZE = 128 def preprocess_image(image, label): img = tf.cast(image, tf.float32) img = img/255. return img, label def get_loaders(x_train, y_train, x_test, y_test): trainloader = tf.data.Dataset.from_tensor_slices((x_train, y_train)) testloader = tf.data.Dataset.from_tensor_slices((x_test, y_test)) trainloader = ( trainloader .shuffle(1024) .map(preprocess_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) testloader = ( testloader .map(preprocess_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) return trainloader, testloader def clean_folder(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) ROOT_PATH = './' ## SmallCNN models with different initialization models_dir = 'IndependentSolutions' MODEL_PATH = ROOT_PATH+'IndependentSolutions/' if models_dir not in os.listdir(ROOT_PATH): os.mkdir(models_dir) def Model1(num_labels, img_shape, color = 3): inputs = keras.layers.Input(shape=(img_shape, img_shape, color)) x = keras.layers.Conv2D(16, (3,3), padding='same')(inputs) x = keras.activations.relu(x) x = keras.layers.MaxPooling2D(2, strides=2)(x) x = keras.layers.Conv2D(32,(3,3), padding='same')(x) x = keras.activations.relu(x) x = keras.layers.MaxPooling2D(2, strides=2)(x) x = keras.layers.Conv2D(32,(3,3), padding='same')(x) x = keras.activations.relu(x) x = keras.layers.MaxPooling2D(2, strides=2)(x) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Dense(32, activation='relu')(x) x = keras.layers.Dropout(0.1)(x) outputs = keras.layers.Dense(num_labels, activation='softmax')(x) return keras.models.Model(inputs=inputs, outputs=outputs) def Model(): if color: return Model1(num_labels, img_shape,color) else: return Model1(num_labels, img_shape) learning_rate_range = [0.001,0.1] epsilon_range = [1e-08,1e-05] def lr_schedule(epoch,lr): if (epoch >= 0) & (epoch < 9): return lr elif (epoch >= 9) & (epoch < 19): return lr/2 elif (epoch >= 19) & (epoch < 29): return lr/4 else: return lr/8 #lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda epoch: lr_schedule(epoch), verbose=True) lr_callback = tf.keras.callbacks.LearningRateScheduler(lr_schedule) # Saving 5 independent solutions i.e, 5 different inits. # Will save last model snapshot. def run_independent_solutions(): EPOCHS = 10 SAVE_PATH = 'IndependentSolutions/' for iter in tqdm_notebook(range(5)): # initialize weights keras.backend.clear_session() model = Model() #optimizer define with name opt = tf.keras.optimizers.Adam(learning_rate=0.001, epsilon=1e-07, name='Adam'+str(iter)) # compile model.compile(opt, 'sparse_categorical_crossentropy', metrics=['accuracy']) # train _ = model.fit(trainloader, epochs=EPOCHS, validation_data=testloader, callbacks=[lr_callback], verbose=0) # save final model snapshot model.save(SAVE_PATH+'smallcnn_independent_model_{}.h5'.format(iter)) # evaluate loss, accuracy = model.evaluate(testloader, verbose=0) print("Test Error Rate: ", round((1-accuracy)*100, 2), '% | Iteration No: ', iter) def run_independent_solutions(lr,ep,trainloader,testloader): EPOCHS = 10 SAVE_PATH = 'IndependentSolutions/' for iter in tqdm_notebook(range(5)): # initialize weights keras.backend.clear_session() model = Model() #optimizer define with name opt = tf.keras.optimizers.Adam(learning_rate=lr, epsilon=ep, name='Adam'+str(iter)) # compile model.compile(opt, 'sparse_categorical_crossentropy', metrics=['accuracy']) # train _ = model.fit(trainloader, epochs=EPOCHS, validation_data=testloader, callbacks=[lr_callback], verbose=0) # save final model snapshot model.save(SAVE_PATH+'smallcnn_independent_model_{}.h5'.format(iter)) # evaluate loss, accuracy = model.evaluate(testloader, verbose=0) print("Test Error Rate: ", round((1-accuracy)*100, 2), '% | Iteration No: ', iter) #run_independent_solutions() def run_independent_solutions_additive(lr,ep,trainloader,testloader,f_x_test,f_y_test): EPOCHS = 10 SAVE_PATH = 'IndependentSolutions/' clean_folder(SAVE_PATH) accuracy_list = [0] improvments = [] for iter in tqdm_notebook(range(10)): # initialize weights keras.backend.clear_session() model = Model() #optimizer define with name opt = tf.keras.optimizers.Adam(learning_rate=lr, epsilon=ep, name='Adam'+str(iter)) # compile model.compile(opt, 'sparse_categorical_crossentropy', metrics=['accuracy']) # train _ = model.fit(trainloader, epochs=EPOCHS, validation_data=testloader, callbacks=[lr_callback], verbose=0) # save final model snapshot model.save(SAVE_PATH+'smallcnn_independent_model_{}.h5'.format(iter)) # evaluate loss, accuracy = model.evaluate(testloader, verbose=0) print("Test Error Rate: ", round((1-accuracy)*100, 2), '% | Iteration No: ', iter) model_ckpts = os.listdir(MODEL_PATH) members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] yhat = ensemble_predictions(members,f_x_test) accuracy = 100*accuracy_score(f_y_test, yhat) accuracy_list.append(accuracy) #improvments = [j-i for i, j in zip(accuracy_list[:-1], accuracy_list[1:])] improvments.append(accuracy_list[-1]-accuracy_list[-2]) print(accuracy_list) print(improvments) if(len(improvments) > 3): if(improvments[-1] < np.mean(improvments[-4:-1])*1.00): print("found best amount of models: " + str(len(improvments))) return print('did not find amount of models better then maximum') #run_independent_solutions() arr = [1,2,3,4,5,6,7,8,9] for i, j in zip(arr[:-1], arr[1:]): print(i,j) print(arr[-4:-1]) print(arr[-1]-arr[-2]) # make an ensemble prediction for multi-class classification def ensemble_predictions(members): # make predictions yhats = [model.predict(x_test/255.) for model in members] yhats = np.array(yhats) # sum across ensemble members summed = np.sum(yhats, axis=0) # argmax across classes result = np.argmax(summed, axis=1) return result # evaluate a specific number of members in an ensemble def evaluate_n_members(members): accuracy_list = [] for n_members in tqdm_notebook(range(1, len(members))): # select a subset of members subset = members[:n_members] print(len(subset)) # make prediction yhat = ensemble_predictions(subset) # calculate accuracy accuracy_list.append(100*accuracy_score(y_test, yhat)) return accuracy_list def ensemble_predictions(members, test): # make predictions yhats = [model.predict(test/255.) for model in members] yhats = np.array(yhats) # sum across ensemble members summed = np.sum(yhats, axis=0) # argmax across classes result = np.argmax(summed, axis=1) return result # Run the ensembles for plot def run_ensembles_and_plot(): members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] single_model = np.random.choice(members) accuracy_single_model = single_model.evaluate(testloader)[1] * 100 accuracy_list = evaluate_n_members(members) accuracy_list.insert(0, accuracy_single_model) rng = [i for i in range(0, len(members))] plt.figure(figsize=(9,8)) plt.plot(rng, accuracy_list, label='deep ensemble') plt.plot(rng, [accuracy_single_model]*len(rng), '--', label='single model') plt.title("Test accuracy as a function of ensemble size") plt.xlabel("Ensemble size") plt.ylabel("Test accuracy") plt.legend(); plt.savefig('ensemble_func.png') #run_ensembles_and_plot() # Bayesian Optimization with k fold validation def iner_kfold(x_train, y_train, lr, ep): kf = KFold(n_splits = 3, random_state=3, shuffle=True) accuracy_list = [] for train_index, test_index in kf.split(x_train): f_x_train, f_x_test = x_train[train_index], x_train[test_index] f_y_train, f_y_test = y_train[train_index], y_train[test_index] trainloader, testloader = get_loaders(f_x_train, f_y_train, f_x_test, f_y_test) run_independent_solutions(lr, ep,trainloader,testloader) model_ckpts = os.listdir(MODEL_PATH) members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] yhat = ensemble_predictions(members,f_x_test) accuracy = 100*accuracy_score(f_y_test, yhat) accuracy_list.append(accuracy) print('accuracy:', np.mean(accuracy_list), '+/-', np.std(accuracy_list)) return np.mean(accuracy_list) def kfold_opt(x_train, y_train): print("finding best hyper parameters") fit_with_partial = partial(iner_kfold, x_train, y_train) pbounds = { 'lr': (0.0001,0.01), 'ep': (1e-8,1e-5)} optimizer = BayesianOptimization( f=fit_with_partial, pbounds=pbounds, verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent random_state=1, ) optimizer.maximize(init_points=10, n_iter=10) for i, res in enumerate(optimizer.res): print("Iteration {}: \n\t{}".format(i, res)) print(optimizer.max) return optimizer.max['params']['lr'], optimizer.max['params']['ep'] #claculating the result TPR and FPR def tpr_fpr(y_true, y_prediction): cnf_matrix = confusion_matrix(y_true, y_prediction) FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix) FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix) TP = np.diag(cnf_matrix) TN = cnf_matrix.sum() - (FP + FN + TP) FP = FP.astype(float).sum() FN = FN.astype(float).sum() TP = TP.astype(float).sum() TN = TN.astype(float).sum() # Sensitivity, hit rate, recall, or true positive rate TPR = TP/(TP+FN) # Specificity or true negative rate TNR = TN/(TN+FP) # Precision or positive predictive value PPV = TP/(TP+FP) # Negative predictive value NPV = TN/(TN+FN) # Fall out or false positive rate FPR = FP/(FP+TN) # False negative rate FNR = FN/(TP+FN) # False discovery rate FDR = FP/(TP+FP) # Overall accuracy ACC = (TP+TN)/(TP+FP+FN+TN) return TPR, FPR #writing the results to a file def write_res(name, algo, cros_val, hp_lr, hp_ep, accu, tpr, fpr, preci, auc, train_time): file_name = name + '-' + algo + ".csv" print(file_name) f = open(file_name, "a") f.write(name+','+algo+','+str(cros_val)+','+str(hp_lr)+','+str(hp_ep)+',' +str(accu)+','+str(tpr)+','+str(fpr)+','+str(preci)+','+str(auc)+','+str(train_time)+'\n') f.close() #get AUC of multiclass results def multiclass_roc_auc_score(y_test, y_pred, average="macro"): lb = LabelBinarizer() lb.fit(y_test) y_test = lb.transform(y_test) y_pred = lb.transform(y_pred) return roc_auc_score(y_test, y_pred, average=average) # validate preformence result with kfold def kfold_val(): f = open(n + "-improved_algo.csv", "w") f.write('name, algo, cros_val, hp_lr, hp_ep, accu, tpr, fpr, preci, auc, train_time\n') f.close() kf = KFold(n_splits=10, random_state=3, shuffle=True) #should be 10 start1 = time.time() lr=0.0005 ep=1e-07 accuracy_list = [] training_times = [] first_fold = False#True cros_iter = 1 for train_index, test_index in kf.split(x): print("TRAIN:", train_index, "TEST:", test_index) f_x_train, f_x_test = x[train_index], x[test_index] f_y_train, f_y_test = y[train_index], y[test_index] start2 = time.time() if first_fold: #find best hyperparameters to use lr, ep = kfold_opt(f_x_train, f_y_train) first_fold = False trainloader, testloader = get_loaders(f_x_train, f_y_train, f_x_test, f_y_test) run_independent_solutions_additive(lr,ep,trainloader,testloader,f_x_test,f_y_test) end2 = time.time() model_ckpts = os.listdir(MODEL_PATH) members = [tf.keras.models.load_model(MODEL_PATH + model_ckpts[i]) for i in range(len(model_ckpts))] yhat = ensemble_predictions(members,f_x_test) ##### infer results and saving accuracy = 100*accuracy_score(f_y_test, yhat) TPR, FPR = tpr_fpr(f_y_test, yhat) precision = precision_score(f_y_test, yhat, average='weighted') auc = multiclass_roc_auc_score(f_y_test,yhat) accuracy_list.append(accuracy) train_time = end2 - start2 training_times.append(train_time) print("accuracy: {}".format(accuracy)) write_res(n, 'improved_algo', cros_iter, lr, ep, accuracy, TPR, FPR, precision, auc, train_time) cros_iter = cros_iter + 1 end1 = time.time() print('accuracy:', np.mean(accuracy_list), '+/-', np.std(accuracy_list)) print("avarage accuracy: {}".format(np.average(accuracy_list)) ) print("Network takes {:.3f} minuts to fold validate".format((end1 - start1)/60)) #kfold_val() #model_list = ['cifar10', 'cifar100', 'mnist', 'flowers102', 'monkeys10', 'intel','cifar10_2', 'cifar100_2', 'mnist_2', 'flowers102_2', 'monkeys10_2', 'intel_2'] #model_list = ['cifar100', 'cifar100_2', 'flowers102', 'flowers102_2'] model_list = ['cifar10'] color = 0 for n in model_list: if n == 'cifar10': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar10() elif n == 'cifar10_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar10() elif n == 'cifar100': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar100() elif n == 'cifar100_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_cifar100() elif n == 'mnist': x,x2,y,y2,num_labels,img_shape,color = Ensembles_prepare_datasets.load_mnist() elif n == 'mnist_2': x1,x,y1,y,num_labels,img_shape,color = Ensembles_prepare_datasets.load_mnist() elif n == 'flowers102': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_flowers102() elif n == 'flowers102_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_flowers102() elif n == 'monkeys10': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_monkeys10() elif n == 'monkeys10_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_monkeys10() elif n == 'intel': x,x2,y,y2,num_labels,img_shape = Ensembles_prepare_datasets.load_intel() elif n == 'intel_2': x1,x,y1,y,num_labels,img_shape = Ensembles_prepare_datasets.load_intel() kfold_val()
0.566258
0.594198
# Implementation of Recurrent Neural Networks from Scratch :label:`sec_rnn_scratch` In this section we will implement an RNN from scratch for a character-level language model, according to our descriptions in :numref:`sec_rnn`. Such a model will be trained on H. G. Wells' *The Time Machine*. As before, we start by reading the dataset first, which is introduced in :numref:`sec_language_model`. ``` %matplotlib inline import math import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) train_random_iter, vocab_random_iter = d2l.load_data_time_machine( batch_size, num_steps, use_random_iter=True) ``` ## [**One-Hot Encoding**] Recall that each token is represented as a numerical index in `train_iter`. Feeding these indices directly to a neural network might make it hard to learn. We often represent each token as a more expressive feature vector. The easiest representation is called *one-hot encoding*, which is introduced in :numref:`subsec_classification-problem`. In a nutshell, we map each index to a different unit vector: assume that the number of different tokens in the vocabulary is $N$ (`len(vocab)`) and the token indices range from $0$ to $N-1$. If the index of a token is the integer $i$, then we create a vector of all 0s with a length of $N$ and set the element at position $i$ to 1. This vector is the one-hot vector of the original token. The one-hot vectors with indices 0 and 2 are shown below. ``` tf.one_hot(tf.constant([0, 2]), len(vocab)) ``` (**The shape of the minibatch**) that we sample each time (**is (batch size, number of time steps). The `one_hot` function transforms such a minibatch into a three-dimensional tensor with the last dimension equals to the vocabulary size (`len(vocab)`).**) We often transpose the input so that we will obtain an output of shape (number of time steps, batch size, vocabulary size). This will allow us to more conveniently loop through the outermost dimension for updating hidden states of a minibatch, time step by time step. ``` X = tf.reshape(tf.range(10), (2, 5)) tf.one_hot(tf.transpose(X), 28).shape ``` ## Initializing the Model Parameters Next, we [**initialize the model parameters for the RNN model**]. The number of hidden units `num_hiddens` is a tunable hyperparameter. When training language models, the inputs and outputs are from the same vocabulary. Hence, they have the same dimension, which is equal to the vocabulary size. ``` def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) # Hidden layer parameters W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32) W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32) b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32) # Output layer parameters W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xh, W_hh, b_h, W_hq, b_q] return params ``` ## RNN Model To define an RNN model, we first need [**an `init_rnn_state` function to return the hidden state at initialization.**] It returns a tensor filled with 0 and with a shape of (batch size, number of hidden units). Using tuples makes it easier to handle situations where the hidden state contains multiple variables, which we will encounter in later sections. ``` def init_rnn_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) ``` [**The following `rnn` function defines how to compute the hidden state and output at a time step.**] Note that the RNN model loops through the outermost dimension of `inputs` so that it updates hidden states `H` of a minibatch, time step by time step. Besides, the activation function here uses the $\tanh$ function. As described in :numref:`sec_mlp`, the mean value of the $\tanh$ function is 0, when the elements are uniformly distributed over the real numbers. ``` def rnn(inputs, state, params): # Here `inputs` shape: (`num_steps`, `batch_size`, `vocab_size`) W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] # Shape of `X`: (`batch_size`, `vocab_size`) for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) ``` With all the needed functions being defined, next we [**create a class to wrap these functions and store parameters**] for an RNN model implemented from scratch. ``` class RNNModelScratch: #@save """A RNN Model implemented from scratch.""" def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.init_state, self.forward_fn = init_state, forward_fn self.trainable_variables = get_params(vocab_size, num_hiddens) def __call__(self, X, state): X = tf.one_hot(tf.transpose(X), self.vocab_size) X = tf.cast(X, tf.float32) return self.forward_fn(X, state, self.trainable_variables) def begin_state(self, batch_size, *args, **kwargs): return self.init_state(batch_size, self.num_hiddens) ``` Let us [**check whether the outputs have the correct shapes**], e.g., to ensure that the dimensionality of the hidden state remains unchanged. ``` # defining tensorflow training strategy device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_hiddens = 512 with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape ``` We can see that the output shape is (number of time steps $\times$ batch size, vocabulary size), while the hidden state shape remains the same, i.e., (batch size, number of hidden units). ## Prediction Let us [**first define the prediction function to generate new characters following the user-provided `prefix`**], which is a string containing several characters. When looping through these beginning characters in `prefix`, we keep passing the hidden state to the next time step without generating any output. This is called the *warm-up* period, during which the model updates itself (e.g., update the hidden state) but does not make predictions. After the warm-up period, the hidden state is generally better than its initialized value at the beginning. So we generate the predicted characters and emit them. ``` def predict_ch8(prefix, num_preds, net, vocab): #@save """Generate new characters following the `prefix`.""" state = net.begin_state(batch_size=1, dtype=tf.float32) outputs = [vocab[prefix[0]]] get_input = lambda: tf.reshape(tf.constant([outputs[-1]]), (1, 1)).numpy() for y in prefix[1:]: # Warm-up period _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): # Predict `num_preds` steps y, state = net(get_input(), state) outputs.append(int(y.numpy().argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) ``` Now we can test the `predict_ch8` function. We specify the prefix as `time traveller ` and have it generate 10 additional characters. Given that we have not trained the network, it will generate nonsensical predictions. ``` predict_ch8('time traveller ', 10, net, vocab) ``` ## [**Gradient Clipping**] For a sequence of length $T$, we compute the gradients over these $T$ time steps in an iteration, which results in a chain of matrix-products with length $\mathcal{O}(T)$ during backpropagation. As mentioned in :numref:`sec_numerical_stability`, it might result in numerical instability, e.g., the gradients may either explode or vanish, when $T$ is large. Therefore, RNN models often need extra help to stabilize the training. Generally speaking, when solving an optimization problem, we take update steps for the model parameter, say in the vector form $\mathbf{x}$, in the direction of the negative gradient $\mathbf{g}$ on a minibatch. For example, with $\eta > 0$ as the learning rate, in one iteration we update $\mathbf{x}$ as $\mathbf{x} - \eta \mathbf{g}$. Let us further assume that the objective function $f$ is well behaved, say, *Lipschitz continuous* with constant $L$. That is to say, for any $\mathbf{x}$ and $\mathbf{y}$ we have $$|f(\mathbf{x}) - f(\mathbf{y})| \leq L \|\mathbf{x} - \mathbf{y}\|.$$ In this case we can safely assume that if we update the parameter vector by $\eta \mathbf{g}$, then $$|f(\mathbf{x}) - f(\mathbf{x} - \eta\mathbf{g})| \leq L \eta\|\mathbf{g}\|,$$ which means that we will not observe a change by more than $L \eta \|\mathbf{g}\|$. This is both a curse and a blessing. On the curse side, it limits the speed of making progress; whereas on the blessing side, it limits the extent to which things can go wrong if we move in the wrong direction. Sometimes the gradients can be quite large and the optimization algorithm may fail to converge. We could address this by reducing the learning rate $\eta$. But what if we only *rarely* get large gradients? In this case such an approach may appear entirely unwarranted. One popular alternative is to clip the gradient $\mathbf{g}$ by projecting them back to a ball of a given radius, say $\theta$ via (**$$\mathbf{g} \leftarrow \min\left(1, \frac{\theta}{\|\mathbf{g}\|}\right) \mathbf{g}.$$**) By doing so we know that the gradient norm never exceeds $\theta$ and that the updated gradient is entirely aligned with the original direction of $\mathbf{g}$. It also has the desirable side-effect of limiting the influence any given minibatch (and within it any given sample) can exert on the parameter vector. This bestows a certain degree of robustness to the model. Gradient clipping provides a quick fix to the gradient exploding. While it does not entirely solve the problem, it is one of the many techniques to alleviate it. Below we define a function to clip the gradients of a model that is implemented from scratch or a model constructed by the high-level APIs. Also note that we compute the gradient norm over all the model parameters. ``` def grad_clipping(grads, theta): #@save """Clip the gradient.""" theta = tf.constant(theta, dtype=tf.float32) new_grad = [] for grad in grads: if isinstance(grad, tf.IndexedSlices): new_grad.append(tf.convert_to_tensor(grad)) else: new_grad.append(grad) norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy() for grad in new_grad)) norm = tf.cast(norm, tf.float32) if tf.greater(norm, theta): for i, grad in enumerate(new_grad): new_grad[i] = grad * theta / norm else: new_grad = new_grad return new_grad ``` ## Training Before training the model, let us [**define a function to train the model in one epoch**]. It differs from how we train the model of :numref:`sec_softmax_scratch` in three places: 1. Different sampling methods for sequential data (random sampling and sequential partitioning) will result in differences in the initialization of hidden states. 1. We clip the gradients before updating the model parameters. This ensures that the model does not diverge even when gradients blow up at some point during the training process. 1. We use perplexity to evaluate the model. As discussed in :numref:`subsec_perplexity`, this ensures that sequences of different length are comparable. Specifically, when sequential partitioning is used, we initialize the hidden state only at the beginning of each epoch. Since the $i^\mathrm{th}$ subsequence example in the next minibatch is adjacent to the current $i^\mathrm{th}$ subsequence example, the hidden state at the end of the current minibatch will be used to initialize the hidden state at the beginning of the next minibatch. In this way, historical information of the sequence stored in the hidden state might flow over adjacent subsequences within an epoch. However, the computation of the hidden state at any point depends on all the previous minibatches in the same epoch, which complicates the gradient computation. To reduce computational cost, we detach the gradient before processing any minibatch so that the gradient computation of the hidden state is always limited to the time steps in one minibatch. When using the random sampling, we need to re-initialize the hidden state for each iteration since each example is sampled with a random position. Same as the `train_epoch_ch3` function in :numref:`sec_softmax_scratch`, `updater` is a general function to update the model parameters. It can be either the `d2l.sgd` function implemented from scratch or the built-in optimization function in a deep learning framework. ``` #@save def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter): """Train a model within one epoch (defined in Chapter 8).""" state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) # Sum of training loss, no. of tokens for X, Y in train_iter: if state is None or use_random_iter: # Initialize `state` when either it is the first iteration or # using random sampling state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32) with tf.GradientTape(persistent=True) as g: y_hat, state = net(X, state) y = tf.reshape(tf.transpose(Y), (-1)) l = loss(y, y_hat) params = net.trainable_variables grads = g.gradient(l, params) grads = grad_clipping(grads, 1) updater.apply_gradients(zip(grads, params)) # Keras loss by default returns the average loss in a batch # l_sum = l * float(d2l.size(y)) if isinstance( # loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() ``` [**The training function supports an RNN model implemented either from scratch or using high-level APIs.**] ``` #@save def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False): """Train a model (defined in Chapter 8).""" with strategy.scope(): loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) updater = tf.keras.optimizers.SGD(lr) animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab) # Train and predict for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter) if (epoch + 1) % 10 == 0: print(predict('time traveller')) animator.add(epoch + 1, [ppl]) device = d2l.try_gpu()._device_name print(f'perplexity {ppl:.1f}, {speed:.1f} tokens/sec on {str(device)}') print(predict('time traveller')) print(predict('traveller')) ``` [**Now we can train the RNN model.**] Since we only use 10000 tokens in the dataset, the model needs more epochs to converge better. ``` num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, strategy) ``` [**Finally, let us check the results of using the random sampling method.**] ``` with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) train_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True) ``` While implementing the above RNN model from scratch is instructive, it is not convenient. In the next section we will see how to improve the RNN model, such as how to make it easier to implement and make it run faster. ## Summary * We can train an RNN-based character-level language model to generate text following the user-provided text prefix. * A simple RNN language model consists of input encoding, RNN modeling, and output generation. * RNN models need state initialization for training, though random sampling and sequential partitioning use different ways. * When using sequential partitioning, we need to detach the gradient to reduce computational cost. * A warm-up period allows a model to update itself (e.g., obtain a better hidden state than its initialized value) before making any prediction. * Gradient clipping prevents gradient explosion, but it cannot fix vanishing gradients. ## Exercises 1. Show that one-hot encoding is equivalent to picking a different embedding for each object. 1. Adjust the hyperparameters (e.g., number of epochs, number of hidden units, number of time steps in a minibatch, and learning rate) to improve the perplexity. * How low can you go? * Replace one-hot encoding with learnable embeddings. Does this lead to better performance? * How well will it work on other books by H. G. Wells, e.g., [*The War of the Worlds*](http://www.gutenberg.org/ebooks/36)? 1. Modify the prediction function such as to use sampling rather than picking the most likely next character. * What happens? * Bias the model towards more likely outputs, e.g., by sampling from $q(x_t \mid x_{t-1}, \ldots, x_1) \propto P(x_t \mid x_{t-1}, \ldots, x_1)^\alpha$ for $\alpha > 1$. 1. Run the code in this section without clipping the gradient. What happens? 1. Change sequential partitioning so that it does not separate hidden states from the computational graph. Does the running time change? How about the perplexity? 1. Replace the activation function used in this section with ReLU and repeat the experiments in this section. Do we still need gradient clipping? Why? [Discussions](https://discuss.d2l.ai/t/1052)
github_jupyter
%matplotlib inline import math import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) train_random_iter, vocab_random_iter = d2l.load_data_time_machine( batch_size, num_steps, use_random_iter=True) tf.one_hot(tf.constant([0, 2]), len(vocab)) X = tf.reshape(tf.range(10), (2, 5)) tf.one_hot(tf.transpose(X), 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) # Hidden layer parameters W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32) W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32) b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32) # Output layer parameters W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xh, W_hh, b_h, W_hq, b_q] return params def init_rnn_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def rnn(inputs, state, params): # Here `inputs` shape: (`num_steps`, `batch_size`, `vocab_size`) W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] # Shape of `X`: (`batch_size`, `vocab_size`) for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) class RNNModelScratch: #@save """A RNN Model implemented from scratch.""" def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.init_state, self.forward_fn = init_state, forward_fn self.trainable_variables = get_params(vocab_size, num_hiddens) def __call__(self, X, state): X = tf.one_hot(tf.transpose(X), self.vocab_size) X = tf.cast(X, tf.float32) return self.forward_fn(X, state, self.trainable_variables) def begin_state(self, batch_size, *args, **kwargs): return self.init_state(batch_size, self.num_hiddens) # defining tensorflow training strategy device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_hiddens = 512 with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab): #@save """Generate new characters following the `prefix`.""" state = net.begin_state(batch_size=1, dtype=tf.float32) outputs = [vocab[prefix[0]]] get_input = lambda: tf.reshape(tf.constant([outputs[-1]]), (1, 1)).numpy() for y in prefix[1:]: # Warm-up period _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): # Predict `num_preds` steps y, state = net(get_input(), state) outputs.append(int(y.numpy().argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab) def grad_clipping(grads, theta): #@save """Clip the gradient.""" theta = tf.constant(theta, dtype=tf.float32) new_grad = [] for grad in grads: if isinstance(grad, tf.IndexedSlices): new_grad.append(tf.convert_to_tensor(grad)) else: new_grad.append(grad) norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy() for grad in new_grad)) norm = tf.cast(norm, tf.float32) if tf.greater(norm, theta): for i, grad in enumerate(new_grad): new_grad[i] = grad * theta / norm else: new_grad = new_grad return new_grad #@save def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter): """Train a model within one epoch (defined in Chapter 8).""" state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) # Sum of training loss, no. of tokens for X, Y in train_iter: if state is None or use_random_iter: # Initialize `state` when either it is the first iteration or # using random sampling state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32) with tf.GradientTape(persistent=True) as g: y_hat, state = net(X, state) y = tf.reshape(tf.transpose(Y), (-1)) l = loss(y, y_hat) params = net.trainable_variables grads = g.gradient(l, params) grads = grad_clipping(grads, 1) updater.apply_gradients(zip(grads, params)) # Keras loss by default returns the average loss in a batch # l_sum = l * float(d2l.size(y)) if isinstance( # loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() #@save def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False): """Train a model (defined in Chapter 8).""" with strategy.scope(): loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) updater = tf.keras.optimizers.SGD(lr) animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab) # Train and predict for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter) if (epoch + 1) % 10 == 0: print(predict('time traveller')) animator.add(epoch + 1, [ppl]) device = d2l.try_gpu()._device_name print(f'perplexity {ppl:.1f}, {speed:.1f} tokens/sec on {str(device)}') print(predict('time traveller')) print(predict('traveller')) num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, strategy) with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) train_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)
0.816187
0.98882
# Week 1: Census Data Analysis ## Notes ### About the PDB - Data is from the Census Planning Database (PDB) (full dataset is downloadable as a .csv). - The PDB contains data from both the 2010 decennial census and the 2010-2014 American Community Survey (ACS). Since the purpose of the ACS is to measure changing social and economic characteristics of the population, we primarily refer to ACS variables in this analysis. - PDB data is at the census tract or block group (which is more granular) level. - Variable names are explained here: https://api.census.gov/data/2016/pdb/blockgroup/variables.html, https://api.census.gov/data/2016/pdb/tract/variables.html ### Getting geographic information - Locations are given as State/County/Tract/BG codes. In order to interpret these as longitude/latitude coordinates, we need a mapping from block group/census tract to geography. - Census tract to longitude/latitude coordinates are available in the Census Tracts Gazetteer file (https://www.census.gov/geo/maps-data/data/gazetteer2017.html). This is what we use in this preliminary analysis. - Mappings from block group can be accessed by opening the relevant shapefiles in ArcGIS (http://gif.berkeley.edu/resources/arcgis_education_edition.html). - About Shapefiles: https://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2017/TGRSHP2017_TechDoc_Ch2.pdf ``` import pandas as pd import matplotlib.pyplot as plt %matplotlib inline %pylab inline import numpy as np # Census Planning Database - Block Group # full_pdb16_bg_df = pd.read_csv("raw-data/pdb2016_bg_v8_us.csv", encoding="ISO-8859-1") # Census Planning Database - Census Tract full_pdb16_tr_df = pd.read_csv("raw-data/pdb2016_tr_v8_us.csv", encoding="ISO-8859-1") ``` ### Alameda County ``` def df_for_county(county_name): return full_pdb16_tr_df.loc[full_pdb16_tr_df['County_name'] == county_name] alameda_tr_df = df_for_county("Alameda County") # Importing longitude/latitude mappings gaz_tracts_df = pd.read_csv("raw-data/2017_gaz_tracts_06.csv", encoding="ISO-8859-1") def map_lat_long_geoid(geoid_min, geoid_max, df): # Adds latitude and longitude columns to the dataframe. # Modifies df in place. lat_long_df = gaz_tracts_df[gaz_tracts_df['GEOID'] >= geoid_min] lat_long_df = lat_long_df[lat_long_df['GEOID'] < geoid_max] lat_long_df = lat_long_df[['GEOID', 'INTPTLAT', 'INTPTLONG ']] num_tracts = len(lat_long_df) - 1 gidtr_lat, gidtr_long = {}, {} for i in range(num_tracts): geoid, lat, long = lat_long_df.iloc[i][0], lat_long_df.iloc[i][1], lat_long_df.iloc[i][2] gidtr_lat[geoid], gidtr_long[geoid] = lat, long df['Latitude'] = df['GIDTR'].map(gidtr_lat) df['Longitude'] = df['GIDTR'].map(gidtr_long) map_lat_long_geoid(6001000000, 6002000000, alameda_tr_df) alameda_tr_df # Some preliminary datasets gender_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] income_alameda_tr_df gender_alameda_tr_df.to_csv('datasets/alameda/gender_alameda_tr.csv') ethnicity_alameda_tr_df.to_csv('datasets/alameda/ethnicity_alameda_tr.csv') health_ins_alameda_tr_df.to_csv('datasets/alameda/health_ins_alameda_tr.csv') income_alameda_tr_df.to_csv('datasets/alameda/income_alameda_tr.csv') ``` ### Other Bay Area Counties ``` sanfrancisco_tr_df = df_for_county("San Francisco County") map_lat_long_geoid(6075000000, 6076000000, sanfrancisco_tr_df) gender_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender_sanfrancisco_tr_df.to_csv('datasets/san-francisco/gender_sanfrancisco_tr.csv') ethnicity_sanfrancisco_tr_df.to_csv('datasets/san-francisco/ethnicity_sanfrancisco_tr.csv') health_ins_sanfrancisco_tr_df.to_csv('datasets/san-francisco/health_ins_sanfrancisco_tr.csv') income_sanfrancisco_tr_df.to_csv('datasets/san-francisco/income_sanfrancisco_tr.csv') sanmateo_tr_df = df_for_county("San Mateo County") map_lat_long_geoid(6081000000, 6082000000, sanmateo_tr_df) gender_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender_sanmateo_tr_df.to_csv('datasets/san-mateo/gender_sanmateo_tr.csv') ethnicity_sanmateo_tr_df.to_csv('datasets/san-mateo/ethnicity_sanmateo_tr.csv') health_ins_sanmateo_tr_df.to_csv('datasets/san-mateo/health_ins_sanmateo_tr.csv') income_sanmateo_tr_df.to_csv('datasets/san-mateo/income_sanmateo_tr.csv') santaclara_tr_df = df_for_county("Santa Clara County") map_lat_long_geoid(6085000000, 6086000000, santaclara_tr_df) gender_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender_santaclara_tr_df.to_csv('datasets/santa-clara/gender_santaclara_tr.csv') ethnicity_santaclara_tr_df.to_csv('datasets/santa-clara/ethnicity_santaclara_tr.csv') health_ins_santaclara_tr_df.to_csv('datasets/santa-clara/health_ins_santaclara_tr.csv') income_santaclara_tr_df.to_csv('datasets/santa-clara/income_santaclara_tr.csv') def write_datasets_for_county(county_name, dir_path): # Gets the data for the county, maps the latitude/longitude coordinates, # and writes the relevant datasets. df = df_for_county(county_name) state, county = df['State'].iloc[0], df['County'].iloc[0] gidtr_min = int(str(state) + str(county).zfill(3) + '000000') gidtr_max = int(str(state) + str(county + 1).zfill(3) + '000000') map_lat_long_geoid(gidtr_min, gidtr_max, df) gender = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender.to_csv(dir_path + "gender_" + county_name.lower().replace(" ", "") + "_tr.csv") ethnicity.to_csv(dir_path + "ethnicity_" + county_name.lower().replace(" ", "") + "_tr.csv") health_ins.to_csv(dir_path + "health_ins_" + county_name.lower().replace(" ", "") + "_tr.csv") income.to_csv(dir_path + "income_" + county_name.lower().replace(" ", "") + "_tr.csv") write_datasets_for_county("Marin County", "datasets/marin/") write_datasets_for_county("Contra Costa County", "datasets/contra-costa/") write_datasets_for_county("Napa County", "datasets/napa/") write_datasets_for_county("Sonoma County", "datasets/sonoma/") write_datasets_for_county("Solano County", "datasets/solano/") ``` # Pre-Process Census Data for Unknowns ## Health Ins ``` pd.read_csv('census-datasets/alameda/income_alameda_tr.csv') set(pd.read_csv('census-datasets/alameda/poverty_level_alameda_tr_split.csv')['Variable']) health_ins_binarized = pd.read_csv('census-datasets/alameda/health_ins_alameda_tr_split_binarized.csv') health_ins_binarized[(health_ins_binarized['variable'] == 'One_Plus_Health_Ins')\ & (np.abs(health_ins_binarized['Latitude'] - 37.867) < 5e-4)] health_ins = pd.read_csv('census-datasets/alameda/health_ins_alameda_tr.csv') health_ins.shape health_ins[np.abs(health_ins['Latitude'] - 37.867) < 5e-4] health_ins_binarized.iloc[493]['Latitude'] - health_ins_binarized.iloc[855]['Latitude'] health_ins_binarized.shape health_ins_binarized.iloc[3] health_ins_binarized.iloc[[3, 363, 723, 1083]] ``` ### Add Two Health Ins to One Health Ins ``` health_ins_cleaned_df = pd.DataFrame.copy(health_ins_binarized.iloc[:720]) vals_to_add = np.zeros(shape=len(health_ins_cleaned_df,)) vals_to_add.shape # add one and two health ins populations together for each tract for i in np.arange(720, 1080): vals_to_add[i - 360] = health_ins_binarized.iloc[i]['value'] health_ins_cleaned_df['value'] = health_ins_cleaned_df['value'] + vals_to_add ``` ### Split the Unknowns ``` unknown_pop_vals = np.array(health_ins_binarized.iloc[1080:]['value']) vals_to_add = np.zeros(shape=len(health_ins_cleaned_df, )) vals_to_add.shape for i in range(360): no_ins_pop = health_ins_cleaned_df.iloc[i]['value'] one_ins_pop = health_ins_cleaned_df.iloc[i + 360]['value'] frac_no_ins = no_ins_pop / (no_ins_pop + one_ins_pop) frac_with_ins = 1.0 - frac_no_ins vals_to_add[i] = np.round(frac_no_ins * unknown_pop_vals[i], decimals=0) vals_to_add[360 + i] = np.round(frac_with_ins * unknown_pop_vals[i], decimals=0) for i in range(360): print(vals_to_add[i] + vals_to_add[360 + i] - unknown_pop_vals[i]) health_ins_cleaned_df['value'] = health_ins_cleaned_df['value'] + vals_to_add health_ins_cleaned_df.shape health_ins.shape health_ins_cleaned_df.head() health_ins_cleaned_df.to_csv('census-datasets/alameda/health_ins_binarized_unknown_removed.csv') ``` ## Race ``` race_split_df = pd.read_csv('census-datasets/alameda/ethnicity_alameda_tr_racial_split.csv') race_split_df.head() set(race_split_df['Variable'].values) vals_to_add = np.zeros(shape=len(race_split_df) - 360,) unknown_pop_vals = np.array(race_split_df['Value'].iloc[2160:].values) unknown_pop_vals.shape for i in range(360): indices = i + np.arange(0, 6) * 360 pop_values = np.array(race_split_df.iloc[indices]['Value'].values) pop_fractions = pop_values / np.sum(pop_values) to_add = np.round(pop_fractions * unknown_pop_vals[i]) for relative_index, true_index in enumerate(indices): vals_to_add[true_index] = to_add[relative_index] vals_to_add.shape race_split_df_clean = pd.DataFrame.copy(race_split_df.iloc[:2160]) race_s race_split_df_clean['Value'] = race_split_df_clean['Value'] + vals_to_add race_split_df_clean['Value'] - race_split_df.iloc[:2160]['Value'] race_split_df_clean.to_csv('census-datasets/alameda/ethnicity_alameda_tr_split_unknown_removed.csv') vals_to_add np.arange(0, 6) * 360 + 359 5 + np.arange(0, 7) * 360 ```
github_jupyter
import pandas as pd import matplotlib.pyplot as plt %matplotlib inline %pylab inline import numpy as np # Census Planning Database - Block Group # full_pdb16_bg_df = pd.read_csv("raw-data/pdb2016_bg_v8_us.csv", encoding="ISO-8859-1") # Census Planning Database - Census Tract full_pdb16_tr_df = pd.read_csv("raw-data/pdb2016_tr_v8_us.csv", encoding="ISO-8859-1") def df_for_county(county_name): return full_pdb16_tr_df.loc[full_pdb16_tr_df['County_name'] == county_name] alameda_tr_df = df_for_county("Alameda County") # Importing longitude/latitude mappings gaz_tracts_df = pd.read_csv("raw-data/2017_gaz_tracts_06.csv", encoding="ISO-8859-1") def map_lat_long_geoid(geoid_min, geoid_max, df): # Adds latitude and longitude columns to the dataframe. # Modifies df in place. lat_long_df = gaz_tracts_df[gaz_tracts_df['GEOID'] >= geoid_min] lat_long_df = lat_long_df[lat_long_df['GEOID'] < geoid_max] lat_long_df = lat_long_df[['GEOID', 'INTPTLAT', 'INTPTLONG ']] num_tracts = len(lat_long_df) - 1 gidtr_lat, gidtr_long = {}, {} for i in range(num_tracts): geoid, lat, long = lat_long_df.iloc[i][0], lat_long_df.iloc[i][1], lat_long_df.iloc[i][2] gidtr_lat[geoid], gidtr_long[geoid] = lat, long df['Latitude'] = df['GIDTR'].map(gidtr_lat) df['Longitude'] = df['GIDTR'].map(gidtr_long) map_lat_long_geoid(6001000000, 6002000000, alameda_tr_df) alameda_tr_df # Some preliminary datasets gender_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] income_alameda_tr_df gender_alameda_tr_df.to_csv('datasets/alameda/gender_alameda_tr.csv') ethnicity_alameda_tr_df.to_csv('datasets/alameda/ethnicity_alameda_tr.csv') health_ins_alameda_tr_df.to_csv('datasets/alameda/health_ins_alameda_tr.csv') income_alameda_tr_df.to_csv('datasets/alameda/income_alameda_tr.csv') sanfrancisco_tr_df = df_for_county("San Francisco County") map_lat_long_geoid(6075000000, 6076000000, sanfrancisco_tr_df) gender_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender_sanfrancisco_tr_df.to_csv('datasets/san-francisco/gender_sanfrancisco_tr.csv') ethnicity_sanfrancisco_tr_df.to_csv('datasets/san-francisco/ethnicity_sanfrancisco_tr.csv') health_ins_sanfrancisco_tr_df.to_csv('datasets/san-francisco/health_ins_sanfrancisco_tr.csv') income_sanfrancisco_tr_df.to_csv('datasets/san-francisco/income_sanfrancisco_tr.csv') sanmateo_tr_df = df_for_county("San Mateo County") map_lat_long_geoid(6081000000, 6082000000, sanmateo_tr_df) gender_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender_sanmateo_tr_df.to_csv('datasets/san-mateo/gender_sanmateo_tr.csv') ethnicity_sanmateo_tr_df.to_csv('datasets/san-mateo/ethnicity_sanmateo_tr.csv') health_ins_sanmateo_tr_df.to_csv('datasets/san-mateo/health_ins_sanmateo_tr.csv') income_sanmateo_tr_df.to_csv('datasets/san-mateo/income_sanmateo_tr.csv') santaclara_tr_df = df_for_county("Santa Clara County") map_lat_long_geoid(6085000000, 6086000000, santaclara_tr_df) gender_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender_santaclara_tr_df.to_csv('datasets/santa-clara/gender_santaclara_tr.csv') ethnicity_santaclara_tr_df.to_csv('datasets/santa-clara/ethnicity_santaclara_tr.csv') health_ins_santaclara_tr_df.to_csv('datasets/santa-clara/health_ins_santaclara_tr.csv') income_santaclara_tr_df.to_csv('datasets/santa-clara/income_santaclara_tr.csv') def write_datasets_for_county(county_name, dir_path): # Gets the data for the county, maps the latitude/longitude coordinates, # and writes the relevant datasets. df = df_for_county(county_name) state, county = df['State'].iloc[0], df['County'].iloc[0] gidtr_min = int(str(state) + str(county).zfill(3) + '000000') gidtr_max = int(str(state) + str(county + 1).zfill(3) + '000000') map_lat_long_geoid(gidtr_min, gidtr_max, df) gender = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']] ethnicity = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']] health_ins = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']] income = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']] gender.to_csv(dir_path + "gender_" + county_name.lower().replace(" ", "") + "_tr.csv") ethnicity.to_csv(dir_path + "ethnicity_" + county_name.lower().replace(" ", "") + "_tr.csv") health_ins.to_csv(dir_path + "health_ins_" + county_name.lower().replace(" ", "") + "_tr.csv") income.to_csv(dir_path + "income_" + county_name.lower().replace(" ", "") + "_tr.csv") write_datasets_for_county("Marin County", "datasets/marin/") write_datasets_for_county("Contra Costa County", "datasets/contra-costa/") write_datasets_for_county("Napa County", "datasets/napa/") write_datasets_for_county("Sonoma County", "datasets/sonoma/") write_datasets_for_county("Solano County", "datasets/solano/") pd.read_csv('census-datasets/alameda/income_alameda_tr.csv') set(pd.read_csv('census-datasets/alameda/poverty_level_alameda_tr_split.csv')['Variable']) health_ins_binarized = pd.read_csv('census-datasets/alameda/health_ins_alameda_tr_split_binarized.csv') health_ins_binarized[(health_ins_binarized['variable'] == 'One_Plus_Health_Ins')\ & (np.abs(health_ins_binarized['Latitude'] - 37.867) < 5e-4)] health_ins = pd.read_csv('census-datasets/alameda/health_ins_alameda_tr.csv') health_ins.shape health_ins[np.abs(health_ins['Latitude'] - 37.867) < 5e-4] health_ins_binarized.iloc[493]['Latitude'] - health_ins_binarized.iloc[855]['Latitude'] health_ins_binarized.shape health_ins_binarized.iloc[3] health_ins_binarized.iloc[[3, 363, 723, 1083]] health_ins_cleaned_df = pd.DataFrame.copy(health_ins_binarized.iloc[:720]) vals_to_add = np.zeros(shape=len(health_ins_cleaned_df,)) vals_to_add.shape # add one and two health ins populations together for each tract for i in np.arange(720, 1080): vals_to_add[i - 360] = health_ins_binarized.iloc[i]['value'] health_ins_cleaned_df['value'] = health_ins_cleaned_df['value'] + vals_to_add unknown_pop_vals = np.array(health_ins_binarized.iloc[1080:]['value']) vals_to_add = np.zeros(shape=len(health_ins_cleaned_df, )) vals_to_add.shape for i in range(360): no_ins_pop = health_ins_cleaned_df.iloc[i]['value'] one_ins_pop = health_ins_cleaned_df.iloc[i + 360]['value'] frac_no_ins = no_ins_pop / (no_ins_pop + one_ins_pop) frac_with_ins = 1.0 - frac_no_ins vals_to_add[i] = np.round(frac_no_ins * unknown_pop_vals[i], decimals=0) vals_to_add[360 + i] = np.round(frac_with_ins * unknown_pop_vals[i], decimals=0) for i in range(360): print(vals_to_add[i] + vals_to_add[360 + i] - unknown_pop_vals[i]) health_ins_cleaned_df['value'] = health_ins_cleaned_df['value'] + vals_to_add health_ins_cleaned_df.shape health_ins.shape health_ins_cleaned_df.head() health_ins_cleaned_df.to_csv('census-datasets/alameda/health_ins_binarized_unknown_removed.csv') race_split_df = pd.read_csv('census-datasets/alameda/ethnicity_alameda_tr_racial_split.csv') race_split_df.head() set(race_split_df['Variable'].values) vals_to_add = np.zeros(shape=len(race_split_df) - 360,) unknown_pop_vals = np.array(race_split_df['Value'].iloc[2160:].values) unknown_pop_vals.shape for i in range(360): indices = i + np.arange(0, 6) * 360 pop_values = np.array(race_split_df.iloc[indices]['Value'].values) pop_fractions = pop_values / np.sum(pop_values) to_add = np.round(pop_fractions * unknown_pop_vals[i]) for relative_index, true_index in enumerate(indices): vals_to_add[true_index] = to_add[relative_index] vals_to_add.shape race_split_df_clean = pd.DataFrame.copy(race_split_df.iloc[:2160]) race_s race_split_df_clean['Value'] = race_split_df_clean['Value'] + vals_to_add race_split_df_clean['Value'] - race_split_df.iloc[:2160]['Value'] race_split_df_clean.to_csv('census-datasets/alameda/ethnicity_alameda_tr_split_unknown_removed.csv') vals_to_add np.arange(0, 6) * 360 + 359 5 + np.arange(0, 7) * 360
0.38445
0.894283
# Generating a set of Total Field anomaly data for a model Notebook to open a dictionary with the Total Field Anomaly data for a set of geometrical objects. #### Import libraries ``` %matplotlib inline from IPython.display import Markdown as md from IPython.display import display as dp import string as st import sys import numpy as np import matplotlib.pyplot as plt import cPickle as pickle import datetime from fatiando.utils import ang2vec, vec2ang from fatiando.mesher import Sphere, Prism,PolygonalPrism from fatiando.gravmag import sphere,prism, polyprism notebook_name = 'synthetic_data.ipynb' ``` #### Importing auxiliary functions ``` dir_modules = '../../mypackage' sys.path.append(dir_modules) import auxiliary_functions as func ``` #### Loading properties of a set of geometrical objects ``` with open('data/model_poly_remanent.pickle') as f: model_poly_remanent = pickle.load(f) ``` #### Loading the grid parameters ``` with open('data/airborne_survey.pickle') as f: airborne = pickle.load(f) ``` #### Constructing a dictionary ``` data_set = dict() ``` #### List of saved files ``` saved_files = [] ``` ## Properties of the model ## Main field ``` inc_gf,dec_gf = model_poly_remanent['main_field'] print'Main field inclination: %.1f degree' % inc_gf print'Main field declination: %.1f degree' % dec_gf ``` ## Magnetization Direction ### Direction w/ the presence of remanent magnetization ``` print 'Intensity: %.1f A/m' % model_poly_remanent['m'] print 'Inclination: %.1f degree' % model_poly_remanent['inc'] print 'Declination: %.1f degree' % model_poly_remanent['dec'] inc_R,dec_R = model_poly_remanent['inc'],model_poly_remanent['dec'] ``` ## Calculating the data ### For Airborne survey #### Observation area ``` print 'Area limits: \n x_max = %.1f m \n x_min = %.1f m \n y_max = %.1f m \n y_min = %.1f m' % (airborne['area'][1],airborne['area'][0], airborne['area'][3],airborne['area'][2]) ``` #### Airborne survey information ``` print 'Shape : (%.0f,%.0f)'% airborne['shape'] print 'Number of data: %.1f' % airborne['N'] print 'dx: %.1f m' % airborne['dx'] print 'dy: %.1f m ' % airborne['dy'] print 'Height: %.1f m' % airborne['z_obs'] ``` #### Calculating the data ``` data_set['tfa_poly_RM_airb'] = polyprism.tf(airborne['x'],airborne['y'],airborne['z'], model_poly_remanent['model'],inc_gf,dec_gf) data_set['tfa_poly_IM_airb'] = polyprism.tf(airborne['x'],airborne['y'],airborne['z'], model_poly_induced['model'],inc_gf,dec_gf) ``` ##### Generating noise for the data set w/ remanet magnetization presence ``` np.random.seed(seed=40) std_noise = 10. r = np.random.normal(0.0,std_noise, airborne['Nx']*airborne['Ny']) data_set['tfa_obs_poly_RM_airb'] = data_set['tfa_poly_RM_airb'] + r ``` ##### Generating noise for the induced data set ``` np.random.seed(seed=40) std_noise = 10. r = np.random.normal(0.0,std_noise, airborne['Nx']*airborne['Ny']) data_set['tfa_obs_poly_IM_airb'] = data_set['tfa_poly_IM_airb'] + r ``` #### Visualization of Total Field Anomaly for airborne survey w/ the presence of Remanent magnetization in a polyprism ``` title_font = 20 bottom_font = 18 saturation_factor = 1. plt.close('all') plt.figure(figsize=(9,9), tight_layout=True) plt.contourf(airborne['y'].reshape(airborne['shape']), airborne['x'].reshape(airborne['shape']), data_set['tfa_obs_poly_RM_airb'].reshape(airborne['shape']), 20, cmap='viridis') plt.colorbar(pad=0.01, aspect=40, shrink=1.0).set_label('nT') plt.xlabel('y (m)', fontsize = title_font) plt.ylabel('x (m)', fontsize = title_font) plt.title('TFA (RM_airborne)', fontsize=title_font) plt.tick_params(labelsize=15) file_name = 'figs/airborne/noisy_data_tfa_poly_RM_airborne' plt.savefig(file_name+'.png',dpi=200) saved_files.append(file_name+'.png') plt.savefig(file_name+'.eps',dpi=200) saved_files.append(file_name+'.eps') plt.show() ``` #### Visualization of Total Field Anomaly for regular grid w/ Induced magnetization polyprism ``` title_font = 20 bottom_font = 18 saturation_factor = 1. plt.close('all') plt.figure(figsize=(9,9), tight_layout=True) plt.contourf(airborne['y'].reshape(airborne['shape']), airborne['x'].reshape(airborne['shape']), data_set['tfa_obs_poly_IM_airb'].reshape(airborne['shape']), 20, cmap='viridis') plt.colorbar(pad=0.01, aspect=40, shrink=1.0).set_label('nT') plt.xlabel('y (m)', fontsize = title_font) plt.ylabel('x (m)', fontsize = title_font) plt.title('TFA (IM_airborne)', fontsize=title_font) plt.tick_params(labelsize=15) file_name = 'figs/airborne/noisy_data_tfa_poly_IM_airborne' plt.savefig(file_name+'.png',dpi=200) saved_files.append(file_name+'.png') plt.savefig(file_name+'.eps',dpi=200) saved_files.append(file_name+'.eps') plt.show() ``` #### Generating .pickle file ``` now = datetime.datetime.utcnow().strftime('%d %B %Y %H:%M:%S UTC') data_set['metadata'] = 'Generated by {name} on {date}'.format(date=now, name=notebook_name) file_name = 'data/data_set.pickle' with open(file_name, 'w') as f: pickle.dump(data_set, f) saved_files.append(file_name) ``` ## Saved files ``` with open('reports/report_%s.md' % notebook_name[:st.index(notebook_name, '.')], 'w') as q: q.write('# Saved files \n') now = datetime.datetime.utcnow().strftime('%d %B %Y %H:%M:%S UTC') header = 'Generated by {name} on {date}'.format(date=now, name=notebook_name) q.write('\n\n'+header+'\n\n') for i, sf in enumerate(saved_files): print '%d %s' % (i+1,sf) q.write('* `%s` \n' % (sf)) ```
github_jupyter
%matplotlib inline from IPython.display import Markdown as md from IPython.display import display as dp import string as st import sys import numpy as np import matplotlib.pyplot as plt import cPickle as pickle import datetime from fatiando.utils import ang2vec, vec2ang from fatiando.mesher import Sphere, Prism,PolygonalPrism from fatiando.gravmag import sphere,prism, polyprism notebook_name = 'synthetic_data.ipynb' dir_modules = '../../mypackage' sys.path.append(dir_modules) import auxiliary_functions as func with open('data/model_poly_remanent.pickle') as f: model_poly_remanent = pickle.load(f) with open('data/airborne_survey.pickle') as f: airborne = pickle.load(f) data_set = dict() saved_files = [] inc_gf,dec_gf = model_poly_remanent['main_field'] print'Main field inclination: %.1f degree' % inc_gf print'Main field declination: %.1f degree' % dec_gf print 'Intensity: %.1f A/m' % model_poly_remanent['m'] print 'Inclination: %.1f degree' % model_poly_remanent['inc'] print 'Declination: %.1f degree' % model_poly_remanent['dec'] inc_R,dec_R = model_poly_remanent['inc'],model_poly_remanent['dec'] print 'Area limits: \n x_max = %.1f m \n x_min = %.1f m \n y_max = %.1f m \n y_min = %.1f m' % (airborne['area'][1],airborne['area'][0], airborne['area'][3],airborne['area'][2]) print 'Shape : (%.0f,%.0f)'% airborne['shape'] print 'Number of data: %.1f' % airborne['N'] print 'dx: %.1f m' % airborne['dx'] print 'dy: %.1f m ' % airborne['dy'] print 'Height: %.1f m' % airborne['z_obs'] data_set['tfa_poly_RM_airb'] = polyprism.tf(airborne['x'],airborne['y'],airborne['z'], model_poly_remanent['model'],inc_gf,dec_gf) data_set['tfa_poly_IM_airb'] = polyprism.tf(airborne['x'],airborne['y'],airborne['z'], model_poly_induced['model'],inc_gf,dec_gf) np.random.seed(seed=40) std_noise = 10. r = np.random.normal(0.0,std_noise, airborne['Nx']*airborne['Ny']) data_set['tfa_obs_poly_RM_airb'] = data_set['tfa_poly_RM_airb'] + r np.random.seed(seed=40) std_noise = 10. r = np.random.normal(0.0,std_noise, airborne['Nx']*airborne['Ny']) data_set['tfa_obs_poly_IM_airb'] = data_set['tfa_poly_IM_airb'] + r title_font = 20 bottom_font = 18 saturation_factor = 1. plt.close('all') plt.figure(figsize=(9,9), tight_layout=True) plt.contourf(airborne['y'].reshape(airborne['shape']), airborne['x'].reshape(airborne['shape']), data_set['tfa_obs_poly_RM_airb'].reshape(airborne['shape']), 20, cmap='viridis') plt.colorbar(pad=0.01, aspect=40, shrink=1.0).set_label('nT') plt.xlabel('y (m)', fontsize = title_font) plt.ylabel('x (m)', fontsize = title_font) plt.title('TFA (RM_airborne)', fontsize=title_font) plt.tick_params(labelsize=15) file_name = 'figs/airborne/noisy_data_tfa_poly_RM_airborne' plt.savefig(file_name+'.png',dpi=200) saved_files.append(file_name+'.png') plt.savefig(file_name+'.eps',dpi=200) saved_files.append(file_name+'.eps') plt.show() title_font = 20 bottom_font = 18 saturation_factor = 1. plt.close('all') plt.figure(figsize=(9,9), tight_layout=True) plt.contourf(airborne['y'].reshape(airborne['shape']), airborne['x'].reshape(airborne['shape']), data_set['tfa_obs_poly_IM_airb'].reshape(airborne['shape']), 20, cmap='viridis') plt.colorbar(pad=0.01, aspect=40, shrink=1.0).set_label('nT') plt.xlabel('y (m)', fontsize = title_font) plt.ylabel('x (m)', fontsize = title_font) plt.title('TFA (IM_airborne)', fontsize=title_font) plt.tick_params(labelsize=15) file_name = 'figs/airborne/noisy_data_tfa_poly_IM_airborne' plt.savefig(file_name+'.png',dpi=200) saved_files.append(file_name+'.png') plt.savefig(file_name+'.eps',dpi=200) saved_files.append(file_name+'.eps') plt.show() now = datetime.datetime.utcnow().strftime('%d %B %Y %H:%M:%S UTC') data_set['metadata'] = 'Generated by {name} on {date}'.format(date=now, name=notebook_name) file_name = 'data/data_set.pickle' with open(file_name, 'w') as f: pickle.dump(data_set, f) saved_files.append(file_name) with open('reports/report_%s.md' % notebook_name[:st.index(notebook_name, '.')], 'w') as q: q.write('# Saved files \n') now = datetime.datetime.utcnow().strftime('%d %B %Y %H:%M:%S UTC') header = 'Generated by {name} on {date}'.format(date=now, name=notebook_name) q.write('\n\n'+header+'\n\n') for i, sf in enumerate(saved_files): print '%d %s' % (i+1,sf) q.write('* `%s` \n' % (sf))
0.195902
0.870982
# Diagrammatic Differentiation in Practice Slides from the Oxford quantum group lunch talk on February 18th 2021. ## Implementing automatic differentiation in discopy ``` from discopy import * from discopy.quantum import * from discopy.quantum.zx import Functor, Diagram from sympy.abc import theta, phi, symbols from matplotlib import pyplot as plt ``` **Derivatives are compositional**: if you have the derivative of each box, then you have the derivative of the diagrams made from those boxes. ``` x = symbols('x') f_array = [[1, 0], [0, x]] g_array = [[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,x*x]] h_array = [[1, 1], [1,-1]] f = QuantumGate('f(x)', n_qubits=1, data=x, array=f_array) g = QuantumGate('g(x)', n_qubits=2, data=x, array=g_array) h = QuantumGate('h', n_qubits=1, data=None, array=h_array) d = {f: [[0, 0], [0, 1]], g: [[0,0,0,0], [0,0,0,0], [0,0,0,0], [0,0,0,2*x]]} circuit = (f @ h >> g) circuit.draw(figsize=(3,3)) ``` Given a commutative rig $\mathbb{S}$, a derivation $\partial: \mathbb{S} \rightarrow \mathbb{S}$ is defined as any operation that satisfies the linearity and product rules: $$ \partial(f + g) = \partial f + \partial g \quad \text{and} \quad \partial(f \times g) = \partial f \times g + f \times \partial g $$ A related notation is dual numbers $D[\mathbb{S}]$, numbers of the form $a + b \epsilon$ for $a, b \in \mathbb{S}$ such that $\epsilon^2 = 0$. Sum and product are given by: \begin{aligned} \left(a+a^{\prime} \epsilon\right)+\left(b+b^{\prime} \epsilon\right) &=(a+b)+\left(a+b^{\prime}\right) \epsilon \\ \left(a+a^{\prime} \epsilon\right) \times\left(b+b^{\prime} \epsilon\right) &=(a \times b)+\left(a \times b^{\prime}+a^{\prime} \times b\right) \epsilon \end{aligned} Write $\pi_0, \pi_1 : D[\mathbb{S}] \to \mathbb{S}$ for the projections along the real and epsilon component resp. **Lemma:** Every derivation defines a rig homomorphism $\mathbb{S} \to D[\mathbb{S}]$ with $f \mapsto f + \partial f$. The other way around, every a rig homomorphism $\partial : \mathbb{S} \to D[\mathbb{S}]$ with $\pi_0 \circ \partial = \text{id}_\mathbb{S}$ defines a derivation $\pi_1 \circ \partial : \mathbb{S} \to \mathbb{S}$. For example, in the rig of smooth functions we can lift any smooth function $f : \mathbb{R} \rightarrow \mathbb{R}$ to a function $f: D[\mathbb{R}] \rightarrow D[\mathbb{R}]$ over the dual numbers defined by: $$f\left(a+a^{\prime} \epsilon\right)=f(a)+a^{\prime} \times(\partial f)(a) \epsilon$$ Then we can derive the following linearity, product and chain rules: \begin{aligned} (f+g)\left(a+a^{\prime} \epsilon\right) &=(f+g)(a)+a^{\prime} \times(\partial f+\partial g)(a) \epsilon \\ (f \times g)\left(a+a^{\prime} \epsilon\right) &=(f \times g)(a)+a^{\prime} \times(f \times \partial g+\partial f \times g)(a) \epsilon \\ (f \circ g)\left(a+a^{\prime} \epsilon\right) &=(f \circ g)(a)+a^{\prime} \times(\partial g \times \partial f \circ g)(a) \epsilon \end{aligned} ``` eps = QuantumGate('eps', n_qubits=0, array=[1e-10]) def DualFunctorAr(box): if x in box.free_symbols: d_box = QuantumGate(f'd{box.name}', n_qubits=len(box.cod), data=x, array=d[box]) return box + d_box @ eps else: return box dual_functor = CircuitFunctor(ob=lambda x: x, ar=DualFunctorAr) test = dual_functor(circuit) test.draw(figsize=(15, 5)) def project_in_eps(diagram): eps_terms = [] for term in diagram.terms: if [box.name for box in term.boxes].count('eps') == 1: # remove epsilon remove_eps_functor = CircuitFunctor(ob=lambda x: x, ar=lambda x: Id(0) if x.name == 'eps' else x) eps_term = remove_eps_functor(term) eps_terms.append(eps_term) return Sum(eps_terms, cod=diagram.cod, dom=diagram.dom) drawing.equation(circuit, project_in_eps(test), figsize=(18, 3), symbol="--dual--> --project-->") ``` $$\tiny (f(x) \otimes h) \circ g = \left(\begin{pmatrix}1 & 0 \\ 0 & x\end{pmatrix} \otimes \begin{pmatrix}1 & 1 \\ 1 & -1\end{pmatrix}\right) \circ \begin{pmatrix}1&&&\\&1&&\\&&1&\\&&&x^2\end{pmatrix} = \begin{pmatrix}1&1&&\\1&-1&&\\&&x&x^3\\&&x&-x^3\end{pmatrix} $$ ``` project_in_eps(test).eval().array.reshape(4, 4) ``` ## Rules for diffentiating diagrams ![image.png](attachment:image.png) ``` circuit = (Rx(x) @ Id(1)) >> CX >> (Rx(2*x) @ Id(1)) drawing.equation(circuit, circuit.grad(x, mixed=False), symbol='|--->', figsize=(15, 4)) ``` ## Rules for differentiating ZX diagrams ![image.png](attachment:image.png) ``` XC = QuantumGate('XC', n_qubits=2, array=[[0,1,0,0], [1,0,0,0],[0,0,1,0], [0,0,0,1]]) def gate2zx_new(box): from discopy.quantum.zx import gate2zx, PRO, Z, X, Id if box == XC: return Id(1) @ Z(1, 2) >> X(2, 1) @ Id(1) else: return gate2zx(box) circuit2zx = Functor(ob={qubit: PRO(1)}, ar=gate2zx_new, ob_factory=PRO, ar_factory=Diagram) circuit = (Rx(x) @ Id(1)) >> CX drawing.equation(circuit, circuit2zx(circuit), circuit2zx(circuit).grad(x), figsize=(9, 2), symbol="---->") ``` ## Doubling via the CPM construction ``` swaps = Id(2) @ SWAP >> Id(1) @ SWAP @ Id(1) doubled_circuit = swaps[::-1] >> Id(1) @ Rx(-x) @ Rx(x) @ Id(1) >> XC @ CX >> swaps drawing.equation(doubled_circuit, circuit2zx(doubled_circuit), symbol="--- ZX -->", figsize=(15, 4)) ``` Both `Circuit`s and `zx.Diagram`s can be differentiated. ``` doubled_circuit.grad(x, mixed=False).draw(figsize=(12, 4)) circuit2zx(doubled_circuit).grad(x).draw(figsize=(12, 3)) ``` Differentiating a circuit as doubled diagram can give you an asymmetric, undoubled diagram due to the product rule. These diagrams cannot be executed on quantum hardware. ## Differntiating Circuits Similar to how we defined the derivatives of the ZX `Spider`s in terms of `Spiders`, the generators of ZX, we need to define the derivatives of the parameterised `QuantumGate`s in terms of `QuantumGate`s From Schuld et al. the parameter-shift rule for `Rz` is given by $\partial R_z(\theta) = \frac{1}{2} [R_z(\theta + \frac{\pi}{2}) - R_z(\theta - \frac{\pi}{2})]$ ``` drawing.equation(Rz(x).bubble(drawing_name="circ ∂"), Rz(x).grad(x), figsize=(12, 4)) drawing.equation(Rz(x).bubble(drawing_name="double").bubble(drawing_name="diag ∂"), (Rz(-x) @ Rz(x)).grad(x, mixed=False), figsize=(12, 4)) ``` Bear in mind that the previous equation is an equation on circuits, and the this one is an equation on linear maps. ## Checks for Diagrams we checked that the diagrammatic derivatives equals the derivatives of sympy. ``` import numpy as np def _to_square_mat(m): m = np.asarray(m).flatten() return m.reshape(2 * (int(np.sqrt(len(m))), )) def test_rot_grad(): from sympy.abc import phi import sympy as sy for gate in (Rx, Ry, Rz, CU1, CRx, CRz): # Compare the grad discopy vs sympy op = gate(phi) d_op_sym = sy.Matrix(_to_square_mat(op.eval().array)).diff(phi) d_op_disco = sy.Matrix( _to_square_mat(op.grad(phi, mixed=False).eval().array)) diff = sy.simplify(d_op_disco - d_op_sym).evalf() assert np.isclose(float(diff.norm()), 0.) test_rot_grad() ``` ## Checks for Circuits we checked that the circuit derivative on the circuit is equal to the diagrammatic derivative of the doubled diagram. ``` def test_rot_grad_mixed(): from sympy.abc import symbols from sympy import Matrix z = symbols('z', real=True) random_values = [0., 1., 0.123, 0.321, 1.234] for gate in (Rx, Ry, Rz): cq_shape = (4, 4) v1 = Matrix((gate(z).eval().conjugate() @ gate(z).eval()) .array.reshape(*cq_shape)).diff(z) v2 = Matrix(gate(z).grad(z).eval(mixed=True).array.reshape(*cq_shape)) for random_value in random_values: v1_sub = v1.subs(z, random_value).evalf() v2_sub = v2.subs(z, random_value).evalf() difference = (v1_sub - v2_sub).norm() assert np.isclose(float(difference), 0.) test_rot_grad_mixed() circuit = Ket(0, 0) >> H @ Rx(phi) >> CX >> Bra(0, 1) gradient = (circuit >> circuit[::-1]).grad(phi, mixed=False) drawing.equation(circuit, gradient, symbol="|-->", figsize=(15, 4)) x = np.arange(0, 1, 0.05) y = np.array([circuit.lambdify(phi)(i).eval(mixed=True).array.imag for i in x]) dy = np.array([gradient.lambdify(phi)(i).eval(mixed=False).array.real for i in x]) plt.subplot(2, 1, 1) plt.plot(x, y) plt.ylabel("Amplitude") plt.subplot(2, 1, 2) plt.plot(x, dy) plt.ylabel("Gradient") ``` ## Bonus: Finding the exponent of a gate using Stone's theorem A one-parameter unitary group is a unitary matrix $U: n \rightarrow n$ in $\operatorname{Mat}_{\mathrm{R} \rightarrow \mathrm{C}}$ with $U(0)=\mathrm{id}_{n}$ and $U(t) U(s)=U(s+t)$ for all $s, t \in \mathbb{R}$. It is strongly continuous when $\lim _{t \rightarrow t_{0}} U(t)=U\left(t_{0}\right)$ for all $t_{0} \in \mathbb{R}$ A one-parameter diagram $d: x^{\otimes n} \rightarrow x^{\otimes n}$ is said to be a unitary group when its interpretation $[[d]]$ is. **Stone's Theorem**: There is a one-to-one correspondance between strongly continuous one-parameter unitary groups and self-adjoint matrices. The bijection is given explicitly by $$H \mapsto \exp (i t H)\quad \text{ and } \quad \mapsto-i(\partial U)(0)$$ ![image.png](attachment:image.png) ## Future Work * completing discopy codebase for QML * solving differential equations * Keeping derivatives of ZX in ZX, rather than sum of ZX * Formulate diag diff for Boolean circuits
github_jupyter
from discopy import * from discopy.quantum import * from discopy.quantum.zx import Functor, Diagram from sympy.abc import theta, phi, symbols from matplotlib import pyplot as plt x = symbols('x') f_array = [[1, 0], [0, x]] g_array = [[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,x*x]] h_array = [[1, 1], [1,-1]] f = QuantumGate('f(x)', n_qubits=1, data=x, array=f_array) g = QuantumGate('g(x)', n_qubits=2, data=x, array=g_array) h = QuantumGate('h', n_qubits=1, data=None, array=h_array) d = {f: [[0, 0], [0, 1]], g: [[0,0,0,0], [0,0,0,0], [0,0,0,0], [0,0,0,2*x]]} circuit = (f @ h >> g) circuit.draw(figsize=(3,3)) eps = QuantumGate('eps', n_qubits=0, array=[1e-10]) def DualFunctorAr(box): if x in box.free_symbols: d_box = QuantumGate(f'd{box.name}', n_qubits=len(box.cod), data=x, array=d[box]) return box + d_box @ eps else: return box dual_functor = CircuitFunctor(ob=lambda x: x, ar=DualFunctorAr) test = dual_functor(circuit) test.draw(figsize=(15, 5)) def project_in_eps(diagram): eps_terms = [] for term in diagram.terms: if [box.name for box in term.boxes].count('eps') == 1: # remove epsilon remove_eps_functor = CircuitFunctor(ob=lambda x: x, ar=lambda x: Id(0) if x.name == 'eps' else x) eps_term = remove_eps_functor(term) eps_terms.append(eps_term) return Sum(eps_terms, cod=diagram.cod, dom=diagram.dom) drawing.equation(circuit, project_in_eps(test), figsize=(18, 3), symbol="--dual--> --project-->") project_in_eps(test).eval().array.reshape(4, 4) circuit = (Rx(x) @ Id(1)) >> CX >> (Rx(2*x) @ Id(1)) drawing.equation(circuit, circuit.grad(x, mixed=False), symbol='|--->', figsize=(15, 4)) XC = QuantumGate('XC', n_qubits=2, array=[[0,1,0,0], [1,0,0,0],[0,0,1,0], [0,0,0,1]]) def gate2zx_new(box): from discopy.quantum.zx import gate2zx, PRO, Z, X, Id if box == XC: return Id(1) @ Z(1, 2) >> X(2, 1) @ Id(1) else: return gate2zx(box) circuit2zx = Functor(ob={qubit: PRO(1)}, ar=gate2zx_new, ob_factory=PRO, ar_factory=Diagram) circuit = (Rx(x) @ Id(1)) >> CX drawing.equation(circuit, circuit2zx(circuit), circuit2zx(circuit).grad(x), figsize=(9, 2), symbol="---->") swaps = Id(2) @ SWAP >> Id(1) @ SWAP @ Id(1) doubled_circuit = swaps[::-1] >> Id(1) @ Rx(-x) @ Rx(x) @ Id(1) >> XC @ CX >> swaps drawing.equation(doubled_circuit, circuit2zx(doubled_circuit), symbol="--- ZX -->", figsize=(15, 4)) doubled_circuit.grad(x, mixed=False).draw(figsize=(12, 4)) circuit2zx(doubled_circuit).grad(x).draw(figsize=(12, 3)) drawing.equation(Rz(x).bubble(drawing_name="circ ∂"), Rz(x).grad(x), figsize=(12, 4)) drawing.equation(Rz(x).bubble(drawing_name="double").bubble(drawing_name="diag ∂"), (Rz(-x) @ Rz(x)).grad(x, mixed=False), figsize=(12, 4)) import numpy as np def _to_square_mat(m): m = np.asarray(m).flatten() return m.reshape(2 * (int(np.sqrt(len(m))), )) def test_rot_grad(): from sympy.abc import phi import sympy as sy for gate in (Rx, Ry, Rz, CU1, CRx, CRz): # Compare the grad discopy vs sympy op = gate(phi) d_op_sym = sy.Matrix(_to_square_mat(op.eval().array)).diff(phi) d_op_disco = sy.Matrix( _to_square_mat(op.grad(phi, mixed=False).eval().array)) diff = sy.simplify(d_op_disco - d_op_sym).evalf() assert np.isclose(float(diff.norm()), 0.) test_rot_grad() def test_rot_grad_mixed(): from sympy.abc import symbols from sympy import Matrix z = symbols('z', real=True) random_values = [0., 1., 0.123, 0.321, 1.234] for gate in (Rx, Ry, Rz): cq_shape = (4, 4) v1 = Matrix((gate(z).eval().conjugate() @ gate(z).eval()) .array.reshape(*cq_shape)).diff(z) v2 = Matrix(gate(z).grad(z).eval(mixed=True).array.reshape(*cq_shape)) for random_value in random_values: v1_sub = v1.subs(z, random_value).evalf() v2_sub = v2.subs(z, random_value).evalf() difference = (v1_sub - v2_sub).norm() assert np.isclose(float(difference), 0.) test_rot_grad_mixed() circuit = Ket(0, 0) >> H @ Rx(phi) >> CX >> Bra(0, 1) gradient = (circuit >> circuit[::-1]).grad(phi, mixed=False) drawing.equation(circuit, gradient, symbol="|-->", figsize=(15, 4)) x = np.arange(0, 1, 0.05) y = np.array([circuit.lambdify(phi)(i).eval(mixed=True).array.imag for i in x]) dy = np.array([gradient.lambdify(phi)(i).eval(mixed=False).array.real for i in x]) plt.subplot(2, 1, 1) plt.plot(x, y) plt.ylabel("Amplitude") plt.subplot(2, 1, 2) plt.plot(x, dy) plt.ylabel("Gradient")
0.646572
0.986841
# Imports ``` # OS interaction import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) # Data Manipulation import pandas as pd # Pandas Settings pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) # Linear Algebra import numpy as np # Data Visualization import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns # DateTime Manipulation import datetime as dt import pytz from pandas.tseries.holiday import * from pandas.tseries.offsets import DateOffset from dateutil.relativedelta import * project_dir = os.path.abspath('..') data_path = '/data/detroit_911_calls_cleaned.csv' csv_path = project_dir + data_path df = pd.read_csv(csv_path) ``` # Load Data ``` print(df.shape) df.head(3) ``` #### Initial Filtering The first logical step here is to move forward with only the columns we will be needing for this project. The most obvious of course are X and Y our longitude and latitude coordinates, as well as the call_timestamp, priority and call description. The rest of the columns will be unnecessary for our purposes. ``` pertinent_cols = [ 'X', 'Y', 'call_timestamp', 'calldescription', 'priority' ] df2 = df[pertinent_cols] # correct calldescription header to fit convention df2 = df2.rename(columns={'calldescription':'call_description'}) df2.head(3) ``` ### Secondary Filtering If we examine the call_description column we will find that not all observations relate to 911 responses, rather they represent administrative functions ``` df2.call_description.value_counts()[:20] ``` There are 2 call descriptions describing non-police functions, 'START OF SHIFT INFORMATION' and 'REMARKS'. So lets define our dataframe to include all the observations save for ones where the call description contains those values ``` admin_calls = df2.call_description.value_counts()[3:5] admin_calls = list(df2.call_description.value_counts()[3:5].index) print(admin_calls) # Lets take out the crank calls too removed_calls = set(admin_calls + ['HANGUP CALLS']) df3 = df2.loc[~df2['call_description'].isin(removed_calls)] print(df3.shape) df3.head(3) ``` # DateTime First thing to be done here is to parse the call timestamps and encode each aspect of the DateTime information as a seperate column, we will also notice here that the timestamps use UTC time, so it would also make sense to localize the timezone as we move forward. ``` def extract_dt_cols(df: pd.DataFrame, col: str, drop_original: bool = True)-> pd.DataFrame: """Extracts datetime features from a series of timestamps Arguments: df {pd.Dataframe} -- The pandas dataframe containing the timestamp feature col {str} -- The name of the column drop_original {boolean} -- default is True, drops column passed in col argument from the dataframe Returns: pd.DataFrame -- DataFrame with the extracted features appended """ df = df.copy() df[col] = pd.to_datetime(df[col]) # localize the data df[col] = df[col].dt.tz_convert('America/Detroit') df['year'] = df[col].dt.year df['month'] = df[col].dt.month df['day'] = df[col].dt.day df['dow'] = df[col].dt.dayofweek df['week'] = df[col].dt.week df['hour'] = df[col].dt.hour if drop_original is True: df = df.drop(columns=col) return df def get_day_part(df: pd.DataFrame, hour_col: str)-> pd.DataFrame: """Extacts the time of day from the hour value by dividing by the hour knife 1 = Morning (0400 - 1000h) 2 = Midday (1000 - 1600h) 3 = Evening (1600 - 2200h) 4 = Night (2200 - 0400h) Arguments: df {pd.DataFrame} -- dataframe containing the hour column hour_col {str} -- name of the hour column Returns: pd.DataFrame -- pandas dataframe with the new column appended """ df = df.copy() hour_knife = 6 df['part_of_day'] = ((df['hour'] + 2) / hour_knife).astype(int) df['part_of_day'] = df['part_of_day'].replace(0, 4) return df class HolidayCalendar(AbstractHolidayCalendar): rules = [ Holiday('NewYearsDay', month=1, day=1, observance=nearest_workday), USMartinLutherKingJr, Holiday('SuperBowl', month=2, day=1, offset=DateOffset(weekday=SU(1))), USPresidentsDay, Holiday('StPatricksDay', month=3, day=17), GoodFriday, Holiday('Easter', month=1, day=1, offset=Easter()), USMemorialDay, Holiday('USIndependenceDay', month=7, day=4, observance=nearest_workday), USLaborDay, Holiday('Halloween', month=10, day=31), USThanksgivingDay, Holiday('Christmas', month=12, day=25, observance=nearest_workday), Holiday('NewYearsEve', month=12, day=31, observance=nearest_workday) ] def get_holidays(start, end): """ Returns an index of holidays from HolidayCalendar args: start = str in YYYY/MM/DD format if month or day is not specified then defaults to 1 end = str in YYYY/MM/DD format if month or day is not specified then defaults to 1 """ inst = HolidayCalendar() sy = pd.to_datetime(start).year sm = pd.to_datetime(start).month sd = pd.to_datetime(start).day ey = pd.to_datetime(end).year em = pd.to_datetime(end).month ed = pd.to_datetime(end).day holidays = inst.holidays(dt.datetime(sy, sm ,sd), dt.datetime(ey, em, ed)) return inst.holidays(dt.datetime(sy, sm ,sd), dt.datetime(ey, em, ed)) def calendar_as_dataframe(index, col='date'): df = pd.DataFrame(index, columns=[col]) df['year'] = df[col].dt.year df['month'] = df[col].dt.month df['day'] = df[col].dt.day df = df.drop(columns=col) return df def timestamp_wrangler(df: pd.DataFrame)-> pd.DataFrame: """ """ # Extract timestamp columns df = extract_dt_cols( df=df, col='call_timestamp' ) # Determine call part of day df = get_day_part( df=df, hour_col = 'hour' ) # Get DataFrame of Holidays and events holidays = calendar_as_dataframe( HolidayCalendar.get_holidays( start='2016/9/20', end='2020/02/24') ) holidays['is_holiday'] = 1 # Create calendar as DataFrame of date ranges calendar = calendar_as_dataframe( pd.date_range( start='2016/9/20', end='2019/02/24') ) # Merge calendar and holiday dataframes merged = pd.merge( calendar, holidays, how='left' ) # Add is_holiday feature to main dataframe df = pd.merge( df, merged, how='left', on=[ 'year', 'month', 'day' ] ) df['is_holiday'].fillna(0, inplace=True) return df df4 = timestamp_wrangler(df3).drop_duplicates() df4 ``` *** ## Geographic Features *** #### Let's plot latitude and longitude coordinates and take a look to see if there might be any issues there ``` geo_sample = df4[['X', 'Y']].sample(100000, random_state=42) sns.scatterplot( data=geo_sample, x='X', y='Y'); ``` !ls
github_jupyter
# OS interaction import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) # Data Manipulation import pandas as pd # Pandas Settings pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) # Linear Algebra import numpy as np # Data Visualization import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns # DateTime Manipulation import datetime as dt import pytz from pandas.tseries.holiday import * from pandas.tseries.offsets import DateOffset from dateutil.relativedelta import * project_dir = os.path.abspath('..') data_path = '/data/detroit_911_calls_cleaned.csv' csv_path = project_dir + data_path df = pd.read_csv(csv_path) print(df.shape) df.head(3) pertinent_cols = [ 'X', 'Y', 'call_timestamp', 'calldescription', 'priority' ] df2 = df[pertinent_cols] # correct calldescription header to fit convention df2 = df2.rename(columns={'calldescription':'call_description'}) df2.head(3) df2.call_description.value_counts()[:20] admin_calls = df2.call_description.value_counts()[3:5] admin_calls = list(df2.call_description.value_counts()[3:5].index) print(admin_calls) # Lets take out the crank calls too removed_calls = set(admin_calls + ['HANGUP CALLS']) df3 = df2.loc[~df2['call_description'].isin(removed_calls)] print(df3.shape) df3.head(3) def extract_dt_cols(df: pd.DataFrame, col: str, drop_original: bool = True)-> pd.DataFrame: """Extracts datetime features from a series of timestamps Arguments: df {pd.Dataframe} -- The pandas dataframe containing the timestamp feature col {str} -- The name of the column drop_original {boolean} -- default is True, drops column passed in col argument from the dataframe Returns: pd.DataFrame -- DataFrame with the extracted features appended """ df = df.copy() df[col] = pd.to_datetime(df[col]) # localize the data df[col] = df[col].dt.tz_convert('America/Detroit') df['year'] = df[col].dt.year df['month'] = df[col].dt.month df['day'] = df[col].dt.day df['dow'] = df[col].dt.dayofweek df['week'] = df[col].dt.week df['hour'] = df[col].dt.hour if drop_original is True: df = df.drop(columns=col) return df def get_day_part(df: pd.DataFrame, hour_col: str)-> pd.DataFrame: """Extacts the time of day from the hour value by dividing by the hour knife 1 = Morning (0400 - 1000h) 2 = Midday (1000 - 1600h) 3 = Evening (1600 - 2200h) 4 = Night (2200 - 0400h) Arguments: df {pd.DataFrame} -- dataframe containing the hour column hour_col {str} -- name of the hour column Returns: pd.DataFrame -- pandas dataframe with the new column appended """ df = df.copy() hour_knife = 6 df['part_of_day'] = ((df['hour'] + 2) / hour_knife).astype(int) df['part_of_day'] = df['part_of_day'].replace(0, 4) return df class HolidayCalendar(AbstractHolidayCalendar): rules = [ Holiday('NewYearsDay', month=1, day=1, observance=nearest_workday), USMartinLutherKingJr, Holiday('SuperBowl', month=2, day=1, offset=DateOffset(weekday=SU(1))), USPresidentsDay, Holiday('StPatricksDay', month=3, day=17), GoodFriday, Holiday('Easter', month=1, day=1, offset=Easter()), USMemorialDay, Holiday('USIndependenceDay', month=7, day=4, observance=nearest_workday), USLaborDay, Holiday('Halloween', month=10, day=31), USThanksgivingDay, Holiday('Christmas', month=12, day=25, observance=nearest_workday), Holiday('NewYearsEve', month=12, day=31, observance=nearest_workday) ] def get_holidays(start, end): """ Returns an index of holidays from HolidayCalendar args: start = str in YYYY/MM/DD format if month or day is not specified then defaults to 1 end = str in YYYY/MM/DD format if month or day is not specified then defaults to 1 """ inst = HolidayCalendar() sy = pd.to_datetime(start).year sm = pd.to_datetime(start).month sd = pd.to_datetime(start).day ey = pd.to_datetime(end).year em = pd.to_datetime(end).month ed = pd.to_datetime(end).day holidays = inst.holidays(dt.datetime(sy, sm ,sd), dt.datetime(ey, em, ed)) return inst.holidays(dt.datetime(sy, sm ,sd), dt.datetime(ey, em, ed)) def calendar_as_dataframe(index, col='date'): df = pd.DataFrame(index, columns=[col]) df['year'] = df[col].dt.year df['month'] = df[col].dt.month df['day'] = df[col].dt.day df = df.drop(columns=col) return df def timestamp_wrangler(df: pd.DataFrame)-> pd.DataFrame: """ """ # Extract timestamp columns df = extract_dt_cols( df=df, col='call_timestamp' ) # Determine call part of day df = get_day_part( df=df, hour_col = 'hour' ) # Get DataFrame of Holidays and events holidays = calendar_as_dataframe( HolidayCalendar.get_holidays( start='2016/9/20', end='2020/02/24') ) holidays['is_holiday'] = 1 # Create calendar as DataFrame of date ranges calendar = calendar_as_dataframe( pd.date_range( start='2016/9/20', end='2019/02/24') ) # Merge calendar and holiday dataframes merged = pd.merge( calendar, holidays, how='left' ) # Add is_holiday feature to main dataframe df = pd.merge( df, merged, how='left', on=[ 'year', 'month', 'day' ] ) df['is_holiday'].fillna(0, inplace=True) return df df4 = timestamp_wrangler(df3).drop_duplicates() df4 geo_sample = df4[['X', 'Y']].sample(100000, random_state=42) sns.scatterplot( data=geo_sample, x='X', y='Y');
0.509276
0.739658
We are checking the chain rule for differentialtion: Let us check the function: $$ f(x) = \tanh( w_2 ( \tanh (w_1 x) ) ) $$ By the chain rule, this should be solved as (we shall use the term $d$ for the differentiation function $f'(x) = 1 - tanh^2(h)$: ``` import numpy as np ``` Function composition: given a list of unary functions, can a longer function be generated? ``` np.tan(np.sin(np.cos(0.5))) from functools import reduce fns = [np.tan, np.sin, np.cos] fns1 = [np.cos, np.sin, np.tan] x = reduce( lambda f, f1: lambda m: f1(f(m)) , fns1, lambda m: m ) x(0.5) def x(n): return lambda m: n*m fns = [x(5), np.tanh, x(3), np.tanh] def fnAll(fnList, x, verbose=False): result = x for i, f in enumerate(fnList): if verbose: print('[{:05d}] --> {}'.format(i, result)) result = f(result) i += 1 if verbose: print('[{:05d}] --> {}'.format(i, result)) return result fnAll(fns, 2, verbose=True) def fdDeltaX(fnList, x): deltaX = 1e-5 result = (fnAll(fns, fDiff, x+deltaX) - fnAll(fns, fDiff, x-deltaX))/(2*deltaX) return result fdDeltaX(fns, 0.1) ``` ## Simulate the scalar case In this case, we shall look at the following: $$ f(x) = \tanh( w_2 \tanh ( w_1x ) ) $$ The differentiation can be viewed as: $$ f'(x) = [1][ w_1 ][ d( w_1x ) ][ w2 ][ d( w_2 \tanh ( w_1x ) ) ] $$ Note that here, $$ d(x) = \frac {d(\tanh(x))} {dx} = 1 - \tanh^2(x) $$ Associated function lists are: ```python fns = [x(5), np.tanh, x(3), np.tanh] fDiff = [dx(5), dTanh, dx(3), dTanh] ``` ``` def x(n): return lambda m: n*m def dTanh(x): return 1 - np.tanh(x)**2 def dx(n): return lambda m: n fns = [x(5), np.tanh, x(3), np.tanh] fDiff = [dx(5), dTanh, dx(3), dTanh] def fnAll(fnList, fDiff, x, verbose=False): result = x dResult = 1 for i, f in enumerate(fnList): if verbose: print('[{:05d}] --> {} {}'.format(i, result, dResult)) dResult *= fDiff[i]( result ) result = f(result) i += 1 if verbose: print('[{:05d}] --> {} {}'.format(i, result, dResult)) return result, dResult fnAll(fns, fDiff, 0.1, verbose=True) ``` ## Simulate the vector case Remember that in this case, we are dealing with partial differential equations: ``` N = 4 i = 2 xn = np.random.rand(N).reshape((-1, 1)) dxn = np.zeros(xn.shape); dxn[i, 0] = 1 def V(M): ''' M = a matrix of shape(m,n) Returns ------- A function that takes a vector of shape (n,1) and returns a vector of shape (m,1) ''' return lambda m: np.matmul(M, m) def dTanh(x): return 1 - np.tanh(x)**2 A = np.random.rand(5, 4) B = np.random.rand(2, 5) C = np.random.rand(1, 2) fns = [(V(A), np.tanh), (V(B), np.tanh), (V(C), np.tanh)] fDiff = [(V(A), dTanh), (V(B), dTanh), (V(C), dTanh)] def fnAll(fnList, fDiff, xn, dxn, verbose=False): result = xn.copy() dResult = dxn.copy() if verbose: print('[{:05d}] result: {} | {}'.format(-1, result.T, dResult.T)) for i, (W, a) in enumerate(fnList): result = W(result) W1, a1 = fDiff[i] dResult = W1(dResult) dResult *= a1(result) if verbose: print('[{:05d}] result: {} | {}'.format(i, result.T, dResult.T)) result = a(result) if verbose: print('[{:05d}] result: {} | {}'.format(i, result.T, dResult.T)) return result fnAll(fns, fDiff, xn, dxn, verbose=True) delXn = 1e-10 xn1 = xn.copy() xn1[i, 0] += delXn (fnAll(fns, fDiff, xn1, dxn) - fnAll(fns, fDiff, xn, dxn))/delXn ```
github_jupyter
import numpy as np np.tan(np.sin(np.cos(0.5))) from functools import reduce fns = [np.tan, np.sin, np.cos] fns1 = [np.cos, np.sin, np.tan] x = reduce( lambda f, f1: lambda m: f1(f(m)) , fns1, lambda m: m ) x(0.5) def x(n): return lambda m: n*m fns = [x(5), np.tanh, x(3), np.tanh] def fnAll(fnList, x, verbose=False): result = x for i, f in enumerate(fnList): if verbose: print('[{:05d}] --> {}'.format(i, result)) result = f(result) i += 1 if verbose: print('[{:05d}] --> {}'.format(i, result)) return result fnAll(fns, 2, verbose=True) def fdDeltaX(fnList, x): deltaX = 1e-5 result = (fnAll(fns, fDiff, x+deltaX) - fnAll(fns, fDiff, x-deltaX))/(2*deltaX) return result fdDeltaX(fns, 0.1) fns = [x(5), np.tanh, x(3), np.tanh] fDiff = [dx(5), dTanh, dx(3), dTanh] def x(n): return lambda m: n*m def dTanh(x): return 1 - np.tanh(x)**2 def dx(n): return lambda m: n fns = [x(5), np.tanh, x(3), np.tanh] fDiff = [dx(5), dTanh, dx(3), dTanh] def fnAll(fnList, fDiff, x, verbose=False): result = x dResult = 1 for i, f in enumerate(fnList): if verbose: print('[{:05d}] --> {} {}'.format(i, result, dResult)) dResult *= fDiff[i]( result ) result = f(result) i += 1 if verbose: print('[{:05d}] --> {} {}'.format(i, result, dResult)) return result, dResult fnAll(fns, fDiff, 0.1, verbose=True) N = 4 i = 2 xn = np.random.rand(N).reshape((-1, 1)) dxn = np.zeros(xn.shape); dxn[i, 0] = 1 def V(M): ''' M = a matrix of shape(m,n) Returns ------- A function that takes a vector of shape (n,1) and returns a vector of shape (m,1) ''' return lambda m: np.matmul(M, m) def dTanh(x): return 1 - np.tanh(x)**2 A = np.random.rand(5, 4) B = np.random.rand(2, 5) C = np.random.rand(1, 2) fns = [(V(A), np.tanh), (V(B), np.tanh), (V(C), np.tanh)] fDiff = [(V(A), dTanh), (V(B), dTanh), (V(C), dTanh)] def fnAll(fnList, fDiff, xn, dxn, verbose=False): result = xn.copy() dResult = dxn.copy() if verbose: print('[{:05d}] result: {} | {}'.format(-1, result.T, dResult.T)) for i, (W, a) in enumerate(fnList): result = W(result) W1, a1 = fDiff[i] dResult = W1(dResult) dResult *= a1(result) if verbose: print('[{:05d}] result: {} | {}'.format(i, result.T, dResult.T)) result = a(result) if verbose: print('[{:05d}] result: {} | {}'.format(i, result.T, dResult.T)) return result fnAll(fns, fDiff, xn, dxn, verbose=True) delXn = 1e-10 xn1 = xn.copy() xn1[i, 0] += delXn (fnAll(fns, fDiff, xn1, dxn) - fnAll(fns, fDiff, xn, dxn))/delXn
0.320183
0.957198
# Fifth order methods & Lotka-Volterra problem The extensisq methods of order 5 are compared to the explicit runge kutta methods of scipy on the Lotka-Volterra problem (predator prey model). This problem was copied from the solve_ivp page in scipy's reference manual. ## Problem definition The parameters of this problem are defined as additional arguments (`args`) to the derivative function. ``` def lotkavolterra(t, z, a, b, c, d): x, y = z return [a*x - b*x*y, -c*y + d*x*y] problem = {'fun' : lotkavolterra, 'y0' : [10., 5.], 't_span' : [0., 15.], 'args' : (1.5, 1, 3, 1)} ``` ## Reference solution First a reference solution is created by solving this problem with low tolerance. ``` from scipy.integrate import solve_ivp reference = solve_ivp(**problem, atol=1e-12, rtol=1e-12, method='DOP853', dense_output=True) ``` ## Solution plot This solution and its derivatives change rapidly. ``` %matplotlib inline import matplotlib.pyplot as plt plt.figure() plt.plot(reference.t, reference.y.T) plt.title('Lotka-Volterra') plt.legend(('prey', 'predator')) plt.show() ``` ## Efficiency plot The method is efficient if it can solve problems to low error with low cost. I will use the number of function evaluations as measure of cost. For the error measure I wil use the RMS of the error norm over all solution points. A function to calculate it is: ``` def rms_err_norm(solution, reference): error = solution.y - reference.sol(solution.t) err_norm = (error**2).mean()**0.5 return err_norm ``` Now let's solve this problem with the explicit runge kutta methods of scipy (`RK45` and `DOP853`) and those of extensisq (`Ts45`, `BS45`, `BS45_i`, `CK45` and `CK45_o`) at a few absolute tolerance values and make a plot to compare their efficiency. The bottom left corner of that plot is the efficiency sweet spot: low error and few fuction evaluations. ``` import numpy as np from extensisq import * methods = ['RK45', 'DOP853', Ts45, BS45, BS45_i, CK45, CK45_o] tolerances = np.logspace(-4, -9, 6) plt.figure() for method in methods: name = method if isinstance(method, str) else method.__name__ e = [] n = [] for tol in tolerances: sol = solve_ivp(**problem, rtol=1e-13, atol=tol, method=method, dense_output=True) # only to separate BS45 and BS45_i err = rms_err_norm(sol, reference) e.append(err) n.append(sol.nfev) if name == 'RK45': style = '--k.' elif name == 'DOP853': style = '-k.' else: style = '.:' plt.loglog(e, n, style, label=name) plt.legend() plt.xlabel(r'||error||$_{RMS}$') plt.ylabel('nr of function evaluations') plt.title('efficiency') plt.show() ``` ## Discussion The efficiency graph shows: * `RK45` has the poorest efficiency of all considered methods. * `Ts45` is quite similar to `RK45`, but just a bit better. * `BS45` and `BS45_i` are the most efficient fifth order methods for lower (tighter) tolerances. These two methods have exactly the same accuracy, but `BS45` needs more evaluations for its accurate interpolant. That interpolant is not used in this case. It was only enabeled, by setting `dense_output=True`, to show the difference with respect to `BS45_i`. * `CK45` and `CK45_o` are the most efficient methods at higher (looser) tolerances. The performance at lower tolerance is similar to `Ts45`. * `DOP853` is a higher order method (eighth). Typically, it is more efficient at lower tolerance, but for this problem and these tolerances it does not work so well. These observation may not be valid for other problems.
github_jupyter
def lotkavolterra(t, z, a, b, c, d): x, y = z return [a*x - b*x*y, -c*y + d*x*y] problem = {'fun' : lotkavolterra, 'y0' : [10., 5.], 't_span' : [0., 15.], 'args' : (1.5, 1, 3, 1)} from scipy.integrate import solve_ivp reference = solve_ivp(**problem, atol=1e-12, rtol=1e-12, method='DOP853', dense_output=True) %matplotlib inline import matplotlib.pyplot as plt plt.figure() plt.plot(reference.t, reference.y.T) plt.title('Lotka-Volterra') plt.legend(('prey', 'predator')) plt.show() def rms_err_norm(solution, reference): error = solution.y - reference.sol(solution.t) err_norm = (error**2).mean()**0.5 return err_norm import numpy as np from extensisq import * methods = ['RK45', 'DOP853', Ts45, BS45, BS45_i, CK45, CK45_o] tolerances = np.logspace(-4, -9, 6) plt.figure() for method in methods: name = method if isinstance(method, str) else method.__name__ e = [] n = [] for tol in tolerances: sol = solve_ivp(**problem, rtol=1e-13, atol=tol, method=method, dense_output=True) # only to separate BS45 and BS45_i err = rms_err_norm(sol, reference) e.append(err) n.append(sol.nfev) if name == 'RK45': style = '--k.' elif name == 'DOP853': style = '-k.' else: style = '.:' plt.loglog(e, n, style, label=name) plt.legend() plt.xlabel(r'||error||$_{RMS}$') plt.ylabel('nr of function evaluations') plt.title('efficiency') plt.show()
0.631481
0.953144
``` import torch from torch import nn import torchvision from torchvision.datasets import ImageFolder from torchvision import transforms from torch.utils.data import DataLoader from pathlib import Path import pandas as pd import sys sys.path.append("..") from video_classification.datasets import FolderOfFrameFoldersDataset, FrameWindowDataset device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ROOT = Path("/home/ubuntu/SupervisedVideoClassification") DATA_ROOT = Path(ROOT/"data") valid_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]), ]) valid_ds = FolderOfFrameFoldersDataset(DATA_ROOT/'validation', transform=valid_transforms, base_class=FrameWindowDataset, window_size=3, overlapping=True,) from torch import nn from torchvision.models import resnet101 from video_classification.models.mlp import MLP class SingleImageResNetModel(nn.Module): def __init__(self, mlp_sizes=[768, 128, 2]): super().__init__() resnet = resnet101(pretrained=True) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.clf = MLP(2048, mlp_sizes) self.freeze_resnet() def forward(self, x): x = self.resnet(x).squeeze() x = self.clf(x) return x def freeze_resnet(self): for p in self.resnet.parameters(): p.requires_grad = False def unfreeze_resnet(self): for p in self.resnet.parameters(): p.requires_grad = True import torch from torch import nn from video_classification.models.mlp import MLP class MultiImageModel(nn.Module): def __init__(self, window_size=3, single_mlp_sizes=[768, 128], joint_mlp_sizes=[64, 2]): super().__init__() self.window_size = window_size self.single_mlp_sizes = single_mlp_sizes self.joint_mlp_sizes = joint_mlp_sizes self.single_image_model = SingleImageResNetModel(self.single_mlp_sizes) self.in_features = self.single_mlp_sizes[-1] * self.window_size self.clf = MLP(self.in_features, joint_mlp_sizes) def forward(self, x): # x is of size [B, T, C, H, W]. In other words, a batch of windows. # each img for the same window goes through SingleImageModel x = x.transpose(0, 1) # -> [T, B, C, H, W] x = torch.cat([self.single_image_model(window) for window in x], 1) # x is now of size [B, T * single_mlp_sizes[-1]] x = self.clf(x) # Now size is [B, joint_mlp_sizes[-1]] which should always be 2 return x def freeze_single_image_model(self): # Freeze the VGG classifier for p in self.single_image_model.parameters(): p.requires_grad = False def unfreeze_single_image_model(self): # Unfreeze the VGG classifier. Training the whole VGG is a no-go, so we only train the classifier part. for p in self.single_image_model.clf.parameters(): p.requires_grad = True best_single_image_model = SingleImageResNetModel(mlp_sizes=[1024, 256, 2]) best_single_image_model.load_state_dict(torch.load(ROOT/"checkpoints/single_frame_resnet_SingleImageResNetModel_39_f1=0.8797445.pth")) best_triple_image_model = MultiImageModel( window_size=3, single_mlp_sizes=[1024, 256], joint_mlp_sizes=[128, 2]) best_triple_image_model.load_state_dict(torch.load(ROOT/"checkpoints/multi_frame_resnet101_from_scratch_MultiImageModel_33_f1=0.8941237.pth")) best_single_image_model = best_single_image_model.to(device) best_triple_image_model = best_triple_image_model.to(device) x = torch.stack([valid_ds[0][0], valid_ds[1][0], valid_ds[2][0], valid_ds[3][0]]).to(device) ``` # Error Analysis ``` """ from tqdm import tqdm import numpy as np valid_loader = DataLoader(valid_ds, batch_size=128, shuffle=False) single_img_probs = [] triple_img_probs = [] y_true = [] with torch.no_grad(): for i, (x, y) in enumerate(tqdm(valid_loader)): x = x.to(device) # B, T, C, H, W single_img_batch_pred = torch.softmax(best_single_image_model(x[:, -1, :, :, :]), dim=-1).cpu().tolist() triple_img_batch_pred = torch.softmax(best_triple_image_model(x), dim=-1).cpu().tolist() batch_true = y.tolist() single_img_probs.extend((y for y in single_img_batch_pred)) triple_img_probs.extend((y for y in triple_img_batch_pred)) y_true.extend((y for y in batch_true)) single_img_probs = np.array(single_img_probs) single_img_pred = np.argmax(single_img_probs, 1) triple_img_probs = np.array(triple_img_probs) triple_img_pred = np.argmax(triple_img_probs, 1) y_true = np.array(y_true) import pandas as pd df = pd.DataFrame.from_dict({ 'single_prob': single_img_probs[:, 1].tolist(), 'single_pred': single_img_pred.tolist(), 'triple_prob': triple_img_probs[:, 1].tolist(), 'triple_pred': triple_img_pred.tolist(), 'y_true': y_true, }) """ ``` Already computed, no point in stressing the GPU. ``` df = pd.read_csv("resnet_error_analysis.csv") df.head() # df.to_csv("resnet_error_analysis.csv", index=False) triple_better = df.query("triple_pred == y_true and single_pred != y_true") triple_better.query("y_true == 1") valid_ds = FolderOfFrameFoldersDataset(DATA_ROOT/'validation', transform=transforms.ToTensor(), base_class=FrameWindowDataset, window_size=3, overlapping=True,) def to_pil(img_num): to_pil_f = transforms.ToPILImage() return to_pil_f(valid_ds[img_num][0][-1]) df.iloc[178:198] df.iloc[2165:2175] ``` This ad-hoc analysis does not seem to yield any visible patterns. Triple does better than Single on seemingly random images. To reduce our search space, we should filter out to the Triple model making at least 3 successful predictions in a row, and compare against first the rows for which Single makes 0 correct predictions, or just 1 and take it from there. Maybe `pd.rolling` can be of use for this? ``` df.head() df['triple_win_sum'] = df['triple_pred'].rolling(window=3).sum() df['single_win_sum'] = df['single_pred'].rolling(window=3).sum() df.head() triple_better = df.query("triple_pred == y_true and triple_win_sum >=3 and single_pred != y_true") triple_better.shape triple_better single_better = df.query("single_pred == y_true and single_win_sum >=3 and triple_pred != y_true") single_better.shape single_better single_worst = df.query("single_pred != y_true and triple_pred == y_true") single_worst_sorted = single_worst.sort_values(by='single_prob') print(single_worst_sorted.shape) single_worst_sorted.head(10) triple_worst = df.query("triple_pred != y_true and single_pred == y_true") triple_worst_sorted = triple_worst.sort_values(by='triple_prob') print(triple_worst_sorted.shape) triple_worst_sorted.head(10) triple_worst = df.query("triple_pred != y_true and single_pred != y_true") triple_worst_sorted = triple_worst.sort_values(by='single_prob') print(triple_worst_sorted.shape) triple_worst_sorted.head(10) triple_better = df.query("triple_pred == y_true and triple_win_sum >=3 and single_pred != y_true") triple_better_sorted = triple_better.sort_values(by='triple_prob', ascending=False) print(triple_better_sorted.shape) triple_better_sorted.head(10) single_better = df.query("single_pred == y_true and single_win_sum >=3 and triple_pred != y_true") single_better_sorted = single_better.sort_values(by='single_prob', ascending=False) print(single_better_sorted.shape) single_better_sorted.head(10) both_confused = df.query(" 0.4 <= single_prob <= 0.6 and 0.4 <= triple_prob <= 0.6 and (single_pred != y_true and triple_pred != y_true)") both_confused_sorted = both_confused.sort_values(by='triple_prob') print(both_confused_sorted.shape) both_confused_sorted.head(10) ```
github_jupyter
import torch from torch import nn import torchvision from torchvision.datasets import ImageFolder from torchvision import transforms from torch.utils.data import DataLoader from pathlib import Path import pandas as pd import sys sys.path.append("..") from video_classification.datasets import FolderOfFrameFoldersDataset, FrameWindowDataset device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ROOT = Path("/home/ubuntu/SupervisedVideoClassification") DATA_ROOT = Path(ROOT/"data") valid_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]), ]) valid_ds = FolderOfFrameFoldersDataset(DATA_ROOT/'validation', transform=valid_transforms, base_class=FrameWindowDataset, window_size=3, overlapping=True,) from torch import nn from torchvision.models import resnet101 from video_classification.models.mlp import MLP class SingleImageResNetModel(nn.Module): def __init__(self, mlp_sizes=[768, 128, 2]): super().__init__() resnet = resnet101(pretrained=True) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.clf = MLP(2048, mlp_sizes) self.freeze_resnet() def forward(self, x): x = self.resnet(x).squeeze() x = self.clf(x) return x def freeze_resnet(self): for p in self.resnet.parameters(): p.requires_grad = False def unfreeze_resnet(self): for p in self.resnet.parameters(): p.requires_grad = True import torch from torch import nn from video_classification.models.mlp import MLP class MultiImageModel(nn.Module): def __init__(self, window_size=3, single_mlp_sizes=[768, 128], joint_mlp_sizes=[64, 2]): super().__init__() self.window_size = window_size self.single_mlp_sizes = single_mlp_sizes self.joint_mlp_sizes = joint_mlp_sizes self.single_image_model = SingleImageResNetModel(self.single_mlp_sizes) self.in_features = self.single_mlp_sizes[-1] * self.window_size self.clf = MLP(self.in_features, joint_mlp_sizes) def forward(self, x): # x is of size [B, T, C, H, W]. In other words, a batch of windows. # each img for the same window goes through SingleImageModel x = x.transpose(0, 1) # -> [T, B, C, H, W] x = torch.cat([self.single_image_model(window) for window in x], 1) # x is now of size [B, T * single_mlp_sizes[-1]] x = self.clf(x) # Now size is [B, joint_mlp_sizes[-1]] which should always be 2 return x def freeze_single_image_model(self): # Freeze the VGG classifier for p in self.single_image_model.parameters(): p.requires_grad = False def unfreeze_single_image_model(self): # Unfreeze the VGG classifier. Training the whole VGG is a no-go, so we only train the classifier part. for p in self.single_image_model.clf.parameters(): p.requires_grad = True best_single_image_model = SingleImageResNetModel(mlp_sizes=[1024, 256, 2]) best_single_image_model.load_state_dict(torch.load(ROOT/"checkpoints/single_frame_resnet_SingleImageResNetModel_39_f1=0.8797445.pth")) best_triple_image_model = MultiImageModel( window_size=3, single_mlp_sizes=[1024, 256], joint_mlp_sizes=[128, 2]) best_triple_image_model.load_state_dict(torch.load(ROOT/"checkpoints/multi_frame_resnet101_from_scratch_MultiImageModel_33_f1=0.8941237.pth")) best_single_image_model = best_single_image_model.to(device) best_triple_image_model = best_triple_image_model.to(device) x = torch.stack([valid_ds[0][0], valid_ds[1][0], valid_ds[2][0], valid_ds[3][0]]).to(device) """ from tqdm import tqdm import numpy as np valid_loader = DataLoader(valid_ds, batch_size=128, shuffle=False) single_img_probs = [] triple_img_probs = [] y_true = [] with torch.no_grad(): for i, (x, y) in enumerate(tqdm(valid_loader)): x = x.to(device) # B, T, C, H, W single_img_batch_pred = torch.softmax(best_single_image_model(x[:, -1, :, :, :]), dim=-1).cpu().tolist() triple_img_batch_pred = torch.softmax(best_triple_image_model(x), dim=-1).cpu().tolist() batch_true = y.tolist() single_img_probs.extend((y for y in single_img_batch_pred)) triple_img_probs.extend((y for y in triple_img_batch_pred)) y_true.extend((y for y in batch_true)) single_img_probs = np.array(single_img_probs) single_img_pred = np.argmax(single_img_probs, 1) triple_img_probs = np.array(triple_img_probs) triple_img_pred = np.argmax(triple_img_probs, 1) y_true = np.array(y_true) import pandas as pd df = pd.DataFrame.from_dict({ 'single_prob': single_img_probs[:, 1].tolist(), 'single_pred': single_img_pred.tolist(), 'triple_prob': triple_img_probs[:, 1].tolist(), 'triple_pred': triple_img_pred.tolist(), 'y_true': y_true, }) """ df = pd.read_csv("resnet_error_analysis.csv") df.head() # df.to_csv("resnet_error_analysis.csv", index=False) triple_better = df.query("triple_pred == y_true and single_pred != y_true") triple_better.query("y_true == 1") valid_ds = FolderOfFrameFoldersDataset(DATA_ROOT/'validation', transform=transforms.ToTensor(), base_class=FrameWindowDataset, window_size=3, overlapping=True,) def to_pil(img_num): to_pil_f = transforms.ToPILImage() return to_pil_f(valid_ds[img_num][0][-1]) df.iloc[178:198] df.iloc[2165:2175] df.head() df['triple_win_sum'] = df['triple_pred'].rolling(window=3).sum() df['single_win_sum'] = df['single_pred'].rolling(window=3).sum() df.head() triple_better = df.query("triple_pred == y_true and triple_win_sum >=3 and single_pred != y_true") triple_better.shape triple_better single_better = df.query("single_pred == y_true and single_win_sum >=3 and triple_pred != y_true") single_better.shape single_better single_worst = df.query("single_pred != y_true and triple_pred == y_true") single_worst_sorted = single_worst.sort_values(by='single_prob') print(single_worst_sorted.shape) single_worst_sorted.head(10) triple_worst = df.query("triple_pred != y_true and single_pred == y_true") triple_worst_sorted = triple_worst.sort_values(by='triple_prob') print(triple_worst_sorted.shape) triple_worst_sorted.head(10) triple_worst = df.query("triple_pred != y_true and single_pred != y_true") triple_worst_sorted = triple_worst.sort_values(by='single_prob') print(triple_worst_sorted.shape) triple_worst_sorted.head(10) triple_better = df.query("triple_pred == y_true and triple_win_sum >=3 and single_pred != y_true") triple_better_sorted = triple_better.sort_values(by='triple_prob', ascending=False) print(triple_better_sorted.shape) triple_better_sorted.head(10) single_better = df.query("single_pred == y_true and single_win_sum >=3 and triple_pred != y_true") single_better_sorted = single_better.sort_values(by='single_prob', ascending=False) print(single_better_sorted.shape) single_better_sorted.head(10) both_confused = df.query(" 0.4 <= single_prob <= 0.6 and 0.4 <= triple_prob <= 0.6 and (single_pred != y_true and triple_pred != y_true)") both_confused_sorted = both_confused.sort_values(by='triple_prob') print(both_confused_sorted.shape) both_confused_sorted.head(10)
0.794345
0.667209
<a href="https://colab.research.google.com/github/krakowiakpawel9/machine-learning-bootcamp/blob/master/unsupervised/02_dimensionality_reduction/02_pca_examples.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> * @author: [email protected] * @site: e-smartdata.org ### scikit-learn Strona biblioteki: [https://scikit-learn.org](https://scikit-learn.org) Dokumentacja/User Guide: [https://scikit-learn.org/stable/user_guide.html](https://scikit-learn.org/stable/user_guide.html) Podstawowa biblioteka do uczenia maszynowego w języku Python. Aby zainstalować bibliotekę scikit-learn, użyj polecenia poniżej: ``` !pip install scikit-learn ``` Aby zaktualizować do najnowszej wersji bibliotekę scikit-learn, użyj polecenia poniżej: ``` !pip install --upgrade scikit-learn ``` Kurs stworzony w oparciu o wersję `0.22.1` ### Spis treści: 1. [Import bibliotek](#0) 2. [Załadowanie danych - breast cancer](#1) 3. [Standaryzacja](#2) 4. [PCA - 2 komponenty](#3) 5. [PCA - 3 komponenty](#4) 6. [Zbiór danych MNIST](#5) 7. [Zbiór danych Cifar](#6) ### <a name='0'></a> Import bibliotek ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.graph_objects as go import plotly.express as px np.set_printoptions(precision=4, suppress=True, linewidth=150) ``` ### <a name='1'></a> Załadowanie danych - breast cancer ``` from sklearn.datasets import load_breast_cancer raw_data = load_breast_cancer() all_data = raw_data.copy() data = all_data['data'] target = all_data['target'] data[:3] target[:30] data.shape ``` ### <a name='2'></a> Standaryzacja ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler() data_std = scaler.fit_transform(data) data_std[:3] ``` ### <a name='3'></a> PCA - 2 komponenty ``` from sklearn.decomposition import PCA pca = PCA(n_components=2) data_pca = pca.fit_transform(data_std) data_pca[:5] pca_2 = pd.DataFrame(data={'pca_1': data_pca[:, 0], 'pca_2': data_pca[:, 1], 'class': target}) pca_2.replace(0, 'Benign', inplace=True) pca_2.replace(1, 'Malignant', inplace=True) pca_2.head() results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 2 components', width=950, template='plotly_dark')) fig.show() px.scatter(pca_2, 'pca_1', 'pca_2', color=pca_2['class'], width=950, template='plotly_dark') ``` ### <a name='4'></a> PCA - 3 komponenty ``` pca = PCA(n_components=3) data_pca = pca.fit_transform(data_std) data_pca[:5] pca_3 = pd.DataFrame(data={'pca_1': data_pca[:, 0], 'pca_2': data_pca[:, 1], 'pca_3': data_pca[:, 2], 'class': target}) pca_3.replace(0, 'Benign', inplace=True) pca_3.replace(1, 'Malignant', inplace=True) pca_3.head() results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 components', width=950, template='plotly_dark')) fig.show() px.scatter_3d(pca_3, x='pca_1', y='pca_2', z='pca_3', color='class', symbol='class', opacity=0.7, size_max=10, width=950, template='plotly_dark') ``` ### <a name='5'></a> Zbiór danych MNIST ``` from keras.datasets import mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() print(f'X_train shape: {X_train.shape}') print(f'X_test shape: {X_test.shape}') print(f'y_train shape: {y_train.shape}') print(f'y_test shape: {y_test.shape}') ``` Obcięcie danych do pierwszych 5000 zdjęć ``` X_train = X_train[:5000] y_train = y_train[:5000] X_train[0] y_train[:5] ``` Wizualizacja danych ``` plt.figure(figsize=(12, 8)) for i in range(8): plt.subplot(240 + i + 1) plt.imshow(X_train[i], cmap='gray_r') plt.title(y_train[i], color='white', fontsize=17) plt.axis('off') plt.show() ``` Standaryzacja ``` X_train = X_train / 255. X_test = X_test / 255. X_train.shape ``` Wypłaszczenie obrazów ``` X_train = X_train.reshape(-1, 28 * 28) X_train.shape ``` PCA - 3 komponenty ``` pca = PCA(n_components=3) X_train_pca = pca.fit_transform(X_train) X_train_pca[:5] ``` Wyjaśniona wariancja ``` results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 komponenty', width=950, template='plotly_dark')) fig.show() X_train_pca_df = pd.DataFrame(np.c_[X_train_pca, y_train], columns=['pca_1', 'pca_2', 'pca_3', 'class']) X_train_pca_df['class'] = X_train_pca_df['class'].astype('str') X_train_pca_df.head() ``` Wizualizacja 3D głównych komponentów ``` px.scatter_3d(X_train_pca_df, x='pca_1', y='pca_2', z='pca_3', color='class', symbol='class', opacity=0.7, size_max=10, width=950, height=700, template='plotly_dark', title='PCA - MNIST dataset') pca = PCA(n_components=0.95) X_train_pca = pca.fit_transform(X_train) X_train_pca[:1] pca.n_components_ ``` Wyjaśniona wariancja ``` results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 komponenty', width=950, template='plotly_dark')) fig.show() ``` ### <a name='6'></a> Zbiór danych Cifar ``` from keras.datasets import cifar10 (X_train, y_train), (X_test, y_test) = cifar10.load_data() print(f'X_train shape: {X_train.shape}') print(f'X_test shape: {X_test.shape}') print(f'y_train shape: {y_train.shape}') print(f'y_test shape: {y_test.shape}') ``` Obcięcie do pierwszych 5000 obrazów ``` X_train = X_train[:5000] y_train = y_train[:5000] X_train[0].shape y_train[:5] ``` Wizualizacja ``` targets = {0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'} plt.imshow(X_train[1]) plt.title(targets[y_train[1][0]], color='white', fontsize=17) plt.axis('off') plt.show() plt.figure(figsize=(12, 8)) for i in range(8): plt.subplot(240 + i + 1) plt.imshow(X_train[i]) plt.title(targets[y_train[i][0]], color='white', fontsize=17) plt.axis('off') plt.show() ``` Standaryzacja ``` X_train = X_train / 255. X_test = X_test / 255. X_train.shape ``` Wypłaszczenie obrazów ``` X_train = X_train.reshape(-1, 32 * 32 * 3) X_train.shape X_train[:5] ``` PCA - 3 komponenty ``` pca = PCA(n_components=3) X_train_pca = pca.fit_transform(X_train) X_train_pca[:5] ``` Wyjaśniona wariancja ``` results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 components', width=950, template='plotly_dark')) fig.show() X_train_pca_df = pd.DataFrame(np.c_[X_train_pca, y_train], columns=['pca_1', 'pca_2', 'pca_3', 'class']) X_train_pca_df['name'] = X_train_pca_df['class'].map(targets) X_train_pca_df['class'] = X_train_pca_df['class'].astype('str') X_train_pca_df.head() ``` Wizualizacja 3D głównych komponentów ``` px.scatter_3d(X_train_pca_df, x='pca_1', y='pca_2', z='pca_3', color='name', symbol='name', opacity=0.7, size_max=10, width=950, height=700, title='PCA - CIFAR dataset', template='plotly_dark') ``` PCA - 0.95% wariancji ``` pca = PCA(n_components=0.95) X_train_pca = pca.fit_transform(X_train) X_train_pca[:5] pca.n_components_ pca.explained_variance_ratio_ ``` Wariancja wyjaśniona ``` results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results.head() fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title=f'PCA - {pca.n_components_} components', width=950, template='plotly_dark')) fig.show() ```
github_jupyter
!pip install scikit-learn !pip install --upgrade scikit-learn import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.graph_objects as go import plotly.express as px np.set_printoptions(precision=4, suppress=True, linewidth=150) from sklearn.datasets import load_breast_cancer raw_data = load_breast_cancer() all_data = raw_data.copy() data = all_data['data'] target = all_data['target'] data[:3] target[:30] data.shape from sklearn.preprocessing import StandardScaler scaler = StandardScaler() data_std = scaler.fit_transform(data) data_std[:3] from sklearn.decomposition import PCA pca = PCA(n_components=2) data_pca = pca.fit_transform(data_std) data_pca[:5] pca_2 = pd.DataFrame(data={'pca_1': data_pca[:, 0], 'pca_2': data_pca[:, 1], 'class': target}) pca_2.replace(0, 'Benign', inplace=True) pca_2.replace(1, 'Malignant', inplace=True) pca_2.head() results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 2 components', width=950, template='plotly_dark')) fig.show() px.scatter(pca_2, 'pca_1', 'pca_2', color=pca_2['class'], width=950, template='plotly_dark') pca = PCA(n_components=3) data_pca = pca.fit_transform(data_std) data_pca[:5] pca_3 = pd.DataFrame(data={'pca_1': data_pca[:, 0], 'pca_2': data_pca[:, 1], 'pca_3': data_pca[:, 2], 'class': target}) pca_3.replace(0, 'Benign', inplace=True) pca_3.replace(1, 'Malignant', inplace=True) pca_3.head() results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 components', width=950, template='plotly_dark')) fig.show() px.scatter_3d(pca_3, x='pca_1', y='pca_2', z='pca_3', color='class', symbol='class', opacity=0.7, size_max=10, width=950, template='plotly_dark') from keras.datasets import mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() print(f'X_train shape: {X_train.shape}') print(f'X_test shape: {X_test.shape}') print(f'y_train shape: {y_train.shape}') print(f'y_test shape: {y_test.shape}') X_train = X_train[:5000] y_train = y_train[:5000] X_train[0] y_train[:5] plt.figure(figsize=(12, 8)) for i in range(8): plt.subplot(240 + i + 1) plt.imshow(X_train[i], cmap='gray_r') plt.title(y_train[i], color='white', fontsize=17) plt.axis('off') plt.show() X_train = X_train / 255. X_test = X_test / 255. X_train.shape X_train = X_train.reshape(-1, 28 * 28) X_train.shape pca = PCA(n_components=3) X_train_pca = pca.fit_transform(X_train) X_train_pca[:5] results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 komponenty', width=950, template='plotly_dark')) fig.show() X_train_pca_df = pd.DataFrame(np.c_[X_train_pca, y_train], columns=['pca_1', 'pca_2', 'pca_3', 'class']) X_train_pca_df['class'] = X_train_pca_df['class'].astype('str') X_train_pca_df.head() px.scatter_3d(X_train_pca_df, x='pca_1', y='pca_2', z='pca_3', color='class', symbol='class', opacity=0.7, size_max=10, width=950, height=700, template='plotly_dark', title='PCA - MNIST dataset') pca = PCA(n_components=0.95) X_train_pca = pca.fit_transform(X_train) X_train_pca[:1] pca.n_components_ results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 komponenty', width=950, template='plotly_dark')) fig.show() from keras.datasets import cifar10 (X_train, y_train), (X_test, y_test) = cifar10.load_data() print(f'X_train shape: {X_train.shape}') print(f'X_test shape: {X_test.shape}') print(f'y_train shape: {y_train.shape}') print(f'y_test shape: {y_test.shape}') X_train = X_train[:5000] y_train = y_train[:5000] X_train[0].shape y_train[:5] targets = {0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'} plt.imshow(X_train[1]) plt.title(targets[y_train[1][0]], color='white', fontsize=17) plt.axis('off') plt.show() plt.figure(figsize=(12, 8)) for i in range(8): plt.subplot(240 + i + 1) plt.imshow(X_train[i]) plt.title(targets[y_train[i][0]], color='white', fontsize=17) plt.axis('off') plt.show() X_train = X_train / 255. X_test = X_test / 255. X_train.shape X_train = X_train.reshape(-1, 32 * 32 * 3) X_train.shape X_train[:5] pca = PCA(n_components=3) X_train_pca = pca.fit_transform(X_train) X_train_pca[:5] results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title='PCA - 3 components', width=950, template='plotly_dark')) fig.show() X_train_pca_df = pd.DataFrame(np.c_[X_train_pca, y_train], columns=['pca_1', 'pca_2', 'pca_3', 'class']) X_train_pca_df['name'] = X_train_pca_df['class'].map(targets) X_train_pca_df['class'] = X_train_pca_df['class'].astype('str') X_train_pca_df.head() px.scatter_3d(X_train_pca_df, x='pca_1', y='pca_2', z='pca_3', color='name', symbol='name', opacity=0.7, size_max=10, width=950, height=700, title='PCA - CIFAR dataset', template='plotly_dark') pca = PCA(n_components=0.95) X_train_pca = pca.fit_transform(X_train) X_train_pca[:5] pca.n_components_ pca.explained_variance_ratio_ results = pd.DataFrame(data={'explained_variance_ratio': pca.explained_variance_ratio_}) results['cumulative'] = results['explained_variance_ratio'].cumsum() results['component'] = results.index + 1 results.head() fig = go.Figure(data=[go.Bar(x=results['component'], y=results['explained_variance_ratio'], name='explained_variance_ratio'), go.Scatter(x=results['component'], y=results['cumulative'], name='cumulative')], layout=go.Layout(title=f'PCA - {pca.n_components_} components', width=950, template='plotly_dark')) fig.show()
0.824427
0.974725
<img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/> # MAT281 ### Aplicaciones de la Matemática en la Ingeniería ## Módulo 02 ## Laboratorio Clase 04: Agrupando datos ### Instrucciones * Completa tus datos personales (nombre y rol USM) en siguiente celda. * La escala es de 0 a 4 considerando solo valores enteros. * Debes _pushear_ tus cambios a tu repositorio personal del curso. * Como respaldo, debes enviar un archivo .zip con el siguiente formato `mXX_cYY_lab_apellido_nombre.zip` a [email protected]. * Se evaluará: - Soluciones - Código - Que Binder esté bien configurado. - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error. * __La entrega es al final de esta clase.__ __Nombre__: Gustavo Renato Arcaya Espinosa __Rol__: 201610001-6 Se utilizará el mismo dataset de pokemon ``` import os import pandas as pd pkm = ( pd.read_csv(os.path.join("data", "pokemon.csv"), index_col="#") .rename(columns=lambda x: x.replace(" ", "").replace(".", "_").lower()) ) pkm.head() ``` ## Ejercicio #1 (1 pto) Agrupar por `generation` y `legendary` y obtener por grupo: * Promedio de `hp` * Mínimo y máximo de `sp_atk` y `sp_def` ``` ( pkm.groupby('generation') .agg( {'legendary' : 'mean'} ) ) ``` ## Ejercicio #2 (1 pto) El profesor Oakgueda determinó que una buen indicador de pokemones es: $$ 0.2 \, \textrm{hp} + 0.4 \,(\textrm{attack} + \textrm{sp_atk})^2 + 0.3 \,( \textrm{defense} + \textrm{sp_deff})^{1.5} + 0.1 \, \textrm{speed}$$ Según este indicador, ¿Qué grupo de pokemones (`type1`, `type2`) es en promedio mejor que el resto? ``` pkm['IND_OAK'] = (0.2*pkm['hp']+ 0.4 * (pkm['attack']+pkm['sp_atk'])**2+ 0.3 * (pkm['defense'] + pkm['sp_def'])**(1.5) + 0.1 * pkm['speed']) pkm.groupby(['type1', 'type2']).agg({'IND_OAK': 'mean'}).sort_values('IND_OAK').tail(1) ``` __Respuesta__: Ground Fire ## Ejercicio #3 (1 pto) Define una función que escale los datos tal que, si $s$ es una columna: $$s\_scaled = \frac{s - \min(s)}{\max(s) - \min(s)}$$ Y luego transforma cada columna agrupando por si el pokemon es legendario o no. ``` def minmax_scale(s): return (s-s.min())/(s.max()-s.min()) pkm.groupby('legendary').transform(minmax_scale) ``` ## Ejercicio #4 (1 pto) El profesor Oakgueda necesita saber cuántos pokemones hay luego de filtrar el dataset tal que el grupo de (`type1`, `type2`) tenga en promedio un indicador (el del ejercicio #2) mayor a 40000. ``` pkm.groupby(['type1', 'type2']).filter(lambda x : x['IND_OAK'].mean() > 40000 ) ``` __Respuesta:__ Hay solo dos pokemones que cumplen esta condición. * Primal Groudon Ground 44774.854123 * Hoopa Unbound Psychic 44369.690779
github_jupyter
import os import pandas as pd pkm = ( pd.read_csv(os.path.join("data", "pokemon.csv"), index_col="#") .rename(columns=lambda x: x.replace(" ", "").replace(".", "_").lower()) ) pkm.head() ( pkm.groupby('generation') .agg( {'legendary' : 'mean'} ) ) pkm['IND_OAK'] = (0.2*pkm['hp']+ 0.4 * (pkm['attack']+pkm['sp_atk'])**2+ 0.3 * (pkm['defense'] + pkm['sp_def'])**(1.5) + 0.1 * pkm['speed']) pkm.groupby(['type1', 'type2']).agg({'IND_OAK': 'mean'}).sort_values('IND_OAK').tail(1) def minmax_scale(s): return (s-s.min())/(s.max()-s.min()) pkm.groupby('legendary').transform(minmax_scale) pkm.groupby(['type1', 'type2']).filter(lambda x : x['IND_OAK'].mean() > 40000 )
0.231267
0.928797
``` import os import pickle from PIL import Image from PIL import ImageOps from urllib.request import urlretrieve import zipfile # Set target path tpath = os.path.join(os.getcwd(), 'omniglot/') # Download and extract omniglot origin_folder = "https://github.com/brendenlake/omniglot/raw/master/python/" fnames = ["images_evaluation.zip", "images_background.zip"] for fname in fnames: origin = os.path.join(origin_folder, fname) if not os.path.isdir('omniglot/'): os.makedirs('omniglot/') fpath = os.path.join(tpath, fname) urlretrieve(origin, fpath) zipfile.ZipFile(fpath).extractall(tpath) # Open all images and collect them in a nested list def load_chars(path): chars = [] char_locs = [] alphabet = [os.path.join(path, x) for x in sorted(os.listdir(path))] for alph in alphabet: character = [os.path.join(alph, x) for x in sorted(os.listdir(alph))] alph_chars = [] alph_char_locs = [] for char in character: char_insts = [os.path.join(char, x) for x in sorted(os.listdir(char))] char_instances = [] for char_inst in char_insts: tmp_im = Image.open(char_inst) tmp_im = tmp_im.convert('L') tmp_im = ImageOps.invert(tmp_im) tmp_im = tmp_im.convert('1') char_instances.append(tmp_im) alph_chars.append(char_instances) alph_char_locs.append(char_insts) chars.append(alph_chars) char_locs.append(alph_char_locs) return chars, char_locs # Run image opening and collection function chars_train, char_locs_train = load_chars(path = os.path.join(tpath, 'images_background/')) chars_eval, char_locs_eval = load_chars(path = os.path.join(tpath, 'images_evaluation/')) #Write dataset to pickle file if not os.path.exists(tpath): os.makedirs(tpath) #Write train split containing all alphabets from images_background with open(tpath + 'chars_train.pickle', 'wb') as fp: pickle.dump(chars_train, fp) with open(tpath + 'char_locs_train.pickle', 'wb') as fp: pickle.dump(char_locs_train, fp) #Write evaluation split containing the first 10 alphabets from images_evaluation with open(tpath + 'chars_eval.pickle', 'wb') as fp: pickle.dump(chars_eval[:10], fp) with open(tpath + 'char_locs_eval.pickle', 'wb') as fp: pickle.dump(char_locs_eval[:10], fp) #Write test split containing the remaining 10 alphabets from images_evaluation with open(tpath + 'chars_test.pickle', 'wb') as fp: pickle.dump(chars_eval[10:], fp) with open(tpath + 'char_locs_test.pickle', 'wb') as fp: pickle.dump(char_locs_eval[10:], fp) ```
github_jupyter
import os import pickle from PIL import Image from PIL import ImageOps from urllib.request import urlretrieve import zipfile # Set target path tpath = os.path.join(os.getcwd(), 'omniglot/') # Download and extract omniglot origin_folder = "https://github.com/brendenlake/omniglot/raw/master/python/" fnames = ["images_evaluation.zip", "images_background.zip"] for fname in fnames: origin = os.path.join(origin_folder, fname) if not os.path.isdir('omniglot/'): os.makedirs('omniglot/') fpath = os.path.join(tpath, fname) urlretrieve(origin, fpath) zipfile.ZipFile(fpath).extractall(tpath) # Open all images and collect them in a nested list def load_chars(path): chars = [] char_locs = [] alphabet = [os.path.join(path, x) for x in sorted(os.listdir(path))] for alph in alphabet: character = [os.path.join(alph, x) for x in sorted(os.listdir(alph))] alph_chars = [] alph_char_locs = [] for char in character: char_insts = [os.path.join(char, x) for x in sorted(os.listdir(char))] char_instances = [] for char_inst in char_insts: tmp_im = Image.open(char_inst) tmp_im = tmp_im.convert('L') tmp_im = ImageOps.invert(tmp_im) tmp_im = tmp_im.convert('1') char_instances.append(tmp_im) alph_chars.append(char_instances) alph_char_locs.append(char_insts) chars.append(alph_chars) char_locs.append(alph_char_locs) return chars, char_locs # Run image opening and collection function chars_train, char_locs_train = load_chars(path = os.path.join(tpath, 'images_background/')) chars_eval, char_locs_eval = load_chars(path = os.path.join(tpath, 'images_evaluation/')) #Write dataset to pickle file if not os.path.exists(tpath): os.makedirs(tpath) #Write train split containing all alphabets from images_background with open(tpath + 'chars_train.pickle', 'wb') as fp: pickle.dump(chars_train, fp) with open(tpath + 'char_locs_train.pickle', 'wb') as fp: pickle.dump(char_locs_train, fp) #Write evaluation split containing the first 10 alphabets from images_evaluation with open(tpath + 'chars_eval.pickle', 'wb') as fp: pickle.dump(chars_eval[:10], fp) with open(tpath + 'char_locs_eval.pickle', 'wb') as fp: pickle.dump(char_locs_eval[:10], fp) #Write test split containing the remaining 10 alphabets from images_evaluation with open(tpath + 'chars_test.pickle', 'wb') as fp: pickle.dump(chars_eval[10:], fp) with open(tpath + 'char_locs_test.pickle', 'wb') as fp: pickle.dump(char_locs_eval[10:], fp)
0.300232
0.189071
``` # GFootball environment. !pip install kaggle_environments !apt-get update -y !apt-get install -y libsdl2-gfx-dev libsdl2-ttf-dev !git clone -b v2.3 https://github.com/google-research/football.git !mkdir -p football/third_party/gfootball_engine/lib !wget https://storage.googleapis.com/gfootball/prebuilt_gameplayfootball_v2.3.so -O football/third_party/gfootball_engine/lib/prebuilt_gameplayfootball.so !cd football && GFOOTBALL_USE_PREBUILT_SO=1 pip3 install . # Some helper code !git clone https://github.com/garethjns/kaggle-football.git !pip install reinforcement_learning_keras==0.6.0 import collections from typing import Union, Callable, List, Tuple, Iterable, Any, Dict from dataclasses import dataclass from tqdm import tqdm import matplotlib.pyplot as plt import numpy as np from tensorflow import keras import tensorflow as tf import seaborn as sns import gym import gfootball import glob import imageio import pathlib import zlib import pickle import tempfile import os import sys from IPython.display import Image, display from gfootball.env import observation_preprocessing sns.set() # In TF > 2, training keras models in a loop with eager execution on causes memory leaks and terrible performance. tf.compat.v1.disable_eager_execution() sys.path.append("/kaggle/working/kaggle-football/") from __future__ import division from __future__ import print_function import itertools as it from random import sample, randint, random from time import time, sleep import numpy as np import skimage.color, skimage.transform import tensorflow as tf from tqdm import trange from argparse import ArgumentParser my_env = gym.make("GFootball-11_vs_11_kaggle-SMM-v0") import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.distributions as distributions import matplotlib.pyplot as plt import numpy as np import gym import tqdm class MLP(nn.Module): def __init__(self,stack_size , action_size , seed): super(MLP , self).__init__() """ define a simple model with some conv layers and later with fully connected layers """ self.in_size = stack_size self.out_size = action_size self.seed = torch.manual_seed(seed) self.conv_block1 = nn.Conv2d( 4 , 16 , kernel_size=3 ,stride=1 , padding=1) self.conv_block2 = nn.Conv2d( 16 , 32 , kernel_size=3 ,stride=2 , padding=1) self.conv_block3 = nn.Conv2d( 32 , 64 , kernel_size=3 ,stride=2 , padding=1) self.conv_block4 = nn.Conv2d( 64 , 256 , kernel_size=3 ,stride=2 , padding=1) self.conv_block5 = nn.Conv2d( 256 , 512 , kernel_size=3 ,stride=2 , padding=1) self.flatten_size = 512*5*6 self.fc1 = nn.Linear(self.flatten_size , 512) self.fc2 = nn.Linear(512 , self.out_size) def forward(self, x): x = self.conv_block1(x) x = F.relu( self.conv_block2(x) ) x = F.relu( self.conv_block3(x) ) x = F.relu( self.conv_block4(x) ) x = F.relu( self.conv_block5(x) ) x = x.view(-1,self.flatten_size) x = F.dropout(F.relu(self.fc1(x)) ,p=0.4 ) x = self.fc2(x) return x def init_weights(m): if type(m) == nn.Linear: torch.nn.init.kaiming_normal_(m.weight) m.bias.data.fill_(0) input_dim = my_env.observation_space.shape[-1] output_dim = my_env.action_space.n device = torch.device('cuda') def prepare_input(frame): frame = frame / 255.0 frame = np.reshape(frame , (4,72,96)) return frame def train(env, policy, optimizer, discount_factor, device): policy.train() log_prob_actions = [] rewards = [] done = False episode_reward = 0 state = env.reset() state = prepare_input(state) while not done: state = torch.FloatTensor(state).unsqueeze(0).to(device) action_pred = policy(state) action_prob = F.softmax(action_pred, dim = -1) dist = distributions.Categorical(action_prob) action = dist.sample() log_prob_action = dist.log_prob(action) state, reward, done, _ = env.step(action.item()) state = prepare_input(state) log_prob_actions.append(log_prob_action) rewards.append(reward) episode_reward += reward log_prob_actions = torch.cat(log_prob_actions) returns = calculate_returns(rewards, discount_factor, device) loss = update_policy(returns, log_prob_actions, optimizer) return loss, episode_reward def calculate_returns(rewards, discount_factor, device, normalize = True): returns = [] R = 0 for r in reversed(rewards): R = r + R * discount_factor returns.insert(0, R) returns = torch.tensor(returns).to(device) if normalize: returns = (returns - returns.mean()) / returns.std() return returns def update_policy(returns, log_prob_actions, optimizer): returns = returns.detach() loss = -(returns * log_prob_actions).sum() optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() def evaluate(env, policy, device): policy.eval() done = False episode_reward = 0 state = env.reset() state = state / 255.0 while not done: state = torch.FloatTensor(state).unsqueeze(0).to(device) with torch.no_grad(): action_pred = policy(state) action_prob = F.softmax(action_pred, dim = -1) action = torch.argmax(action_prob, dim = -1) state, reward, done, _ = env.step(action.item()) episode_reward += reward return episode_reward n_runs = 1 max_episodes = 200 discount_factor = 0.999 train_rewards = torch.zeros(n_runs, max_episodes) test_rewards = torch.zeros(n_runs, max_episodes) device = torch.device('cpu') for run in range(n_runs): policy = MLP(input_dim, output_dim, 45862) policy = policy.to(device) policy.apply(init_weights) optimizer = optim.Adam(policy.parameters(), lr=1e-2) for episode in tqdm.tqdm(range(max_episodes), desc=f'Run: {run}'): loss, train_reward = train(my_env, policy, optimizer, discount_factor, device) train_rewards[run][episode] = train_reward print("Total Loss : {:.4f} Total Reward : {:.4f}".format(loss , train_reward)) torch.save(policy.state_dict(), "montecarlo_model.pth") %%writefile main.py import torch.nn as nn import torch.nn.functional as F import torch import torch.optim as optim import gym import numpy as np from gfootball.env import observation_preprocessing import random # Handling random number generation import time # Handling time calculation from skimage import transform# Help us to preprocess the frames # &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& Model &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&7 class MLP(nn.Module): def __init__(self,stack_size , action_size , seed): super(MLP , self).__init__() """ define a simple model with some conv layers and later with fully connected layers """ self.in_size = stack_size self.out_size = action_size self.seed = torch.manual_seed(seed) self.conv_block1 = nn.Conv2d( 4 , 16 , kernel_size=3 ,stride=1 , padding=1) self.conv_block2 = nn.Conv2d( 16 , 32 , kernel_size=3 ,stride=2 , padding=1) self.conv_block3 = nn.Conv2d( 32 , 64 , kernel_size=3 ,stride=2 , padding=1) self.conv_block4 = nn.Conv2d( 64 , 256 , kernel_size=3 ,stride=2 , padding=1) self.conv_block5 = nn.Conv2d( 256 , 512 , kernel_size=3 ,stride=2 , padding=1) self.flatten_size = 128*5*6 self.fc1 = nn.Linear(self.flatten_size , 512) self.fc2 = nn.Linear(512 , self.out_size) def forward(self, x): x = self.conv_block1(x) x = F.relu( self.conv_block2(x) ) x = F.relu( self.conv_block3(x) ) x = F.relu( self.conv_block5(x) ) x = F.relu( self.conv_block6(x) ) x = x.view(-1,self.flatten_size) x = F.dropout(F.relu(self.fc1(x)) ,p=0.4 ) x = self.fc2(x) return x montecarlo_model = MLP(4,19,67543) try: model.load_state_dict(torch.load("/kaggle_simulations/agent/montecarlo_model.pth" , map_location="cpu")) except (FileNotFoundError, ValueError): model.load_state_dict(torch.load("montecarlo_model.pth" , map_location="cpu")) model.eval() global state global stacked_frames data_preprocessor = DataPreprocess(stack_size=3) #@human_readable_agent def agent(obs): # Get the raw observations return by the environment obs = obs['players_raw'][0] # Convert these to the same output as the SMMWrapper we used in training obs = observation_preprocessing.generate_smm([obs]).squeeze() state = data_preprocessor.stack_frames(obs ,False) state_tensor = torch.from_numpy(state).float().unsqueeze(0).to(device) #inference the model action_tensor = model.forward(state_tensor) # Use the SMMFrameProcessWrapper to do the buffering, but not enviroment # stepping or anything related to the Gym API. action = np.argmax(action_tensor.to('cpu').detach().numpy()) return [int(action)] from typing import Tuple, Dict, List, Any from kaggle_environments import make env = make("football", debug=True,configuration={"save_video": True, "scenario_name": "11_vs_11_kaggle"}) # Define players left_player = "/kaggle/working/main.py" # A custom agent, eg. random_agent.py or example_agent.py right_player = "run_right" # eg. A built in 'AI' agent or the agent again output: List[Tuple[Dict[str, Any], Dict[str, Any]]] = env.run([left_player, right_player]) #print(f"Final score: {sum([r['reward'] for r in output[0]])} : {sum([r['reward'] for r in output[1]])}") env.render(mode="human", width=800, height=600) !tar -czvf submission.tar.gz ./main.py* ./montecarlo_model.pth* ```
github_jupyter
# GFootball environment. !pip install kaggle_environments !apt-get update -y !apt-get install -y libsdl2-gfx-dev libsdl2-ttf-dev !git clone -b v2.3 https://github.com/google-research/football.git !mkdir -p football/third_party/gfootball_engine/lib !wget https://storage.googleapis.com/gfootball/prebuilt_gameplayfootball_v2.3.so -O football/third_party/gfootball_engine/lib/prebuilt_gameplayfootball.so !cd football && GFOOTBALL_USE_PREBUILT_SO=1 pip3 install . # Some helper code !git clone https://github.com/garethjns/kaggle-football.git !pip install reinforcement_learning_keras==0.6.0 import collections from typing import Union, Callable, List, Tuple, Iterable, Any, Dict from dataclasses import dataclass from tqdm import tqdm import matplotlib.pyplot as plt import numpy as np from tensorflow import keras import tensorflow as tf import seaborn as sns import gym import gfootball import glob import imageio import pathlib import zlib import pickle import tempfile import os import sys from IPython.display import Image, display from gfootball.env import observation_preprocessing sns.set() # In TF > 2, training keras models in a loop with eager execution on causes memory leaks and terrible performance. tf.compat.v1.disable_eager_execution() sys.path.append("/kaggle/working/kaggle-football/") from __future__ import division from __future__ import print_function import itertools as it from random import sample, randint, random from time import time, sleep import numpy as np import skimage.color, skimage.transform import tensorflow as tf from tqdm import trange from argparse import ArgumentParser my_env = gym.make("GFootball-11_vs_11_kaggle-SMM-v0") import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.distributions as distributions import matplotlib.pyplot as plt import numpy as np import gym import tqdm class MLP(nn.Module): def __init__(self,stack_size , action_size , seed): super(MLP , self).__init__() """ define a simple model with some conv layers and later with fully connected layers """ self.in_size = stack_size self.out_size = action_size self.seed = torch.manual_seed(seed) self.conv_block1 = nn.Conv2d( 4 , 16 , kernel_size=3 ,stride=1 , padding=1) self.conv_block2 = nn.Conv2d( 16 , 32 , kernel_size=3 ,stride=2 , padding=1) self.conv_block3 = nn.Conv2d( 32 , 64 , kernel_size=3 ,stride=2 , padding=1) self.conv_block4 = nn.Conv2d( 64 , 256 , kernel_size=3 ,stride=2 , padding=1) self.conv_block5 = nn.Conv2d( 256 , 512 , kernel_size=3 ,stride=2 , padding=1) self.flatten_size = 512*5*6 self.fc1 = nn.Linear(self.flatten_size , 512) self.fc2 = nn.Linear(512 , self.out_size) def forward(self, x): x = self.conv_block1(x) x = F.relu( self.conv_block2(x) ) x = F.relu( self.conv_block3(x) ) x = F.relu( self.conv_block4(x) ) x = F.relu( self.conv_block5(x) ) x = x.view(-1,self.flatten_size) x = F.dropout(F.relu(self.fc1(x)) ,p=0.4 ) x = self.fc2(x) return x def init_weights(m): if type(m) == nn.Linear: torch.nn.init.kaiming_normal_(m.weight) m.bias.data.fill_(0) input_dim = my_env.observation_space.shape[-1] output_dim = my_env.action_space.n device = torch.device('cuda') def prepare_input(frame): frame = frame / 255.0 frame = np.reshape(frame , (4,72,96)) return frame def train(env, policy, optimizer, discount_factor, device): policy.train() log_prob_actions = [] rewards = [] done = False episode_reward = 0 state = env.reset() state = prepare_input(state) while not done: state = torch.FloatTensor(state).unsqueeze(0).to(device) action_pred = policy(state) action_prob = F.softmax(action_pred, dim = -1) dist = distributions.Categorical(action_prob) action = dist.sample() log_prob_action = dist.log_prob(action) state, reward, done, _ = env.step(action.item()) state = prepare_input(state) log_prob_actions.append(log_prob_action) rewards.append(reward) episode_reward += reward log_prob_actions = torch.cat(log_prob_actions) returns = calculate_returns(rewards, discount_factor, device) loss = update_policy(returns, log_prob_actions, optimizer) return loss, episode_reward def calculate_returns(rewards, discount_factor, device, normalize = True): returns = [] R = 0 for r in reversed(rewards): R = r + R * discount_factor returns.insert(0, R) returns = torch.tensor(returns).to(device) if normalize: returns = (returns - returns.mean()) / returns.std() return returns def update_policy(returns, log_prob_actions, optimizer): returns = returns.detach() loss = -(returns * log_prob_actions).sum() optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() def evaluate(env, policy, device): policy.eval() done = False episode_reward = 0 state = env.reset() state = state / 255.0 while not done: state = torch.FloatTensor(state).unsqueeze(0).to(device) with torch.no_grad(): action_pred = policy(state) action_prob = F.softmax(action_pred, dim = -1) action = torch.argmax(action_prob, dim = -1) state, reward, done, _ = env.step(action.item()) episode_reward += reward return episode_reward n_runs = 1 max_episodes = 200 discount_factor = 0.999 train_rewards = torch.zeros(n_runs, max_episodes) test_rewards = torch.zeros(n_runs, max_episodes) device = torch.device('cpu') for run in range(n_runs): policy = MLP(input_dim, output_dim, 45862) policy = policy.to(device) policy.apply(init_weights) optimizer = optim.Adam(policy.parameters(), lr=1e-2) for episode in tqdm.tqdm(range(max_episodes), desc=f'Run: {run}'): loss, train_reward = train(my_env, policy, optimizer, discount_factor, device) train_rewards[run][episode] = train_reward print("Total Loss : {:.4f} Total Reward : {:.4f}".format(loss , train_reward)) torch.save(policy.state_dict(), "montecarlo_model.pth") %%writefile main.py import torch.nn as nn import torch.nn.functional as F import torch import torch.optim as optim import gym import numpy as np from gfootball.env import observation_preprocessing import random # Handling random number generation import time # Handling time calculation from skimage import transform# Help us to preprocess the frames # &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& Model &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&7 class MLP(nn.Module): def __init__(self,stack_size , action_size , seed): super(MLP , self).__init__() """ define a simple model with some conv layers and later with fully connected layers """ self.in_size = stack_size self.out_size = action_size self.seed = torch.manual_seed(seed) self.conv_block1 = nn.Conv2d( 4 , 16 , kernel_size=3 ,stride=1 , padding=1) self.conv_block2 = nn.Conv2d( 16 , 32 , kernel_size=3 ,stride=2 , padding=1) self.conv_block3 = nn.Conv2d( 32 , 64 , kernel_size=3 ,stride=2 , padding=1) self.conv_block4 = nn.Conv2d( 64 , 256 , kernel_size=3 ,stride=2 , padding=1) self.conv_block5 = nn.Conv2d( 256 , 512 , kernel_size=3 ,stride=2 , padding=1) self.flatten_size = 128*5*6 self.fc1 = nn.Linear(self.flatten_size , 512) self.fc2 = nn.Linear(512 , self.out_size) def forward(self, x): x = self.conv_block1(x) x = F.relu( self.conv_block2(x) ) x = F.relu( self.conv_block3(x) ) x = F.relu( self.conv_block5(x) ) x = F.relu( self.conv_block6(x) ) x = x.view(-1,self.flatten_size) x = F.dropout(F.relu(self.fc1(x)) ,p=0.4 ) x = self.fc2(x) return x montecarlo_model = MLP(4,19,67543) try: model.load_state_dict(torch.load("/kaggle_simulations/agent/montecarlo_model.pth" , map_location="cpu")) except (FileNotFoundError, ValueError): model.load_state_dict(torch.load("montecarlo_model.pth" , map_location="cpu")) model.eval() global state global stacked_frames data_preprocessor = DataPreprocess(stack_size=3) #@human_readable_agent def agent(obs): # Get the raw observations return by the environment obs = obs['players_raw'][0] # Convert these to the same output as the SMMWrapper we used in training obs = observation_preprocessing.generate_smm([obs]).squeeze() state = data_preprocessor.stack_frames(obs ,False) state_tensor = torch.from_numpy(state).float().unsqueeze(0).to(device) #inference the model action_tensor = model.forward(state_tensor) # Use the SMMFrameProcessWrapper to do the buffering, but not enviroment # stepping or anything related to the Gym API. action = np.argmax(action_tensor.to('cpu').detach().numpy()) return [int(action)] from typing import Tuple, Dict, List, Any from kaggle_environments import make env = make("football", debug=True,configuration={"save_video": True, "scenario_name": "11_vs_11_kaggle"}) # Define players left_player = "/kaggle/working/main.py" # A custom agent, eg. random_agent.py or example_agent.py right_player = "run_right" # eg. A built in 'AI' agent or the agent again output: List[Tuple[Dict[str, Any], Dict[str, Any]]] = env.run([left_player, right_player]) #print(f"Final score: {sum([r['reward'] for r in output[0]])} : {sum([r['reward'] for r in output[1]])}") env.render(mode="human", width=800, height=600) !tar -czvf submission.tar.gz ./main.py* ./montecarlo_model.pth*
0.778986
0.325655
``` %%html <link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" /> <link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" /> <style>.subtitle {font-size:medium; display:block}</style> <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" /> <link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. --> <script> var cell = $(".container .cell").eq(0), ia = cell.find(".input_area") if (cell.find(".toggle-button").length == 0) { ia.after( $('<button class="toggle-button">Toggle hidden code</button>').click( function (){ ia.toggle() } ) ) ia.hide() } </script> ``` **Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the "Run" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard. $\newcommand{\identity}{\mathrm{id}} \newcommand{\notdivide}{\nmid} \newcommand{\notsubset}{\not\subset} \newcommand{\lcm}{\operatorname{lcm}} \newcommand{\gf}{\operatorname{GF}} \newcommand{\inn}{\operatorname{Inn}} \newcommand{\aut}{\operatorname{Aut}} \newcommand{\Hom}{\operatorname{Hom}} \newcommand{\cis}{\operatorname{cis}} \newcommand{\chr}{\operatorname{char}} \newcommand{\Null}{\operatorname{Null}} \newcommand{\lt}{<} \newcommand{\gt}{>} \newcommand{\amp}{&} $ <div class="mathbook-content"><h2 class="heading hide-type" alt="Section 6.2 Lagrange's Theorem"><span class="type">Section</span><span class="codenumber">6.2</span><span class="title">Lagrange's Theorem</span></h2><a href="section-lagranges-theorem.ipynb" class="permalink">¶</a></div> <div class="mathbook-content"><article class="theorem-like" id="cosets_theorem_4"><h6 class="heading"><span class="type">Proposition</span><span class="codenumber">6.9</span></h6><p id="p-988">Let $H$ be a subgroup of $G$ with $g \in G$ and define a map $\phi:H \rightarrow gH$ by $\phi(h) = gh\text{.}$ The map $\phi$ is bijective; hence, the number of elements in $H$ is the same as the number of elements in $gH\text{.}$</p></article><article class="proof" id="proof-38"><h6 class="heading"><span class="type">Proof</span></h6><p id="p-989">We first show that the map $\phi$ is one-to-one. Suppose that $\phi(h_1) = \phi(h_2)$ for elements $h_1, h_2 \in H\text{.}$ We must show that $h_1 = h_2\text{,}$ but $\phi(h_1) = gh_1$ and $\phi(h_2) = gh_2\text{.}$ So $gh_1 = gh_2\text{,}$ and by left cancellation $h_1= h_2\text{.}$ To show that $\phi$ is onto is easy. By definition every element of $gH$ is of the form $gh$ for some $h \in H$ and $\phi(h) = gh\text{.}$</p></article></div> <div class="mathbook-content"><article class="theorem-like" id="theorem-lagrange"><h6 class="heading"><span class="type">Theorem</span><span class="codenumber">6.10</span><span class="title">Lagrange</span></h6><p id="p-990">Let $G$ be a finite group and let $H$ be a subgroup of $G\text{.}$ Then $|G|/|H| = [G : H]$ is the number of distinct left cosets of $H$ in $G\text{.}$ In particular, the number of elements in $H$ must divide the number of elements in $G\text{.}$</p></article><article class="proof" id="proof-39"><h6 class="heading"><span class="type">Proof</span></h6><p id="p-991">The group $G$ is partitioned into $[G : H]$ distinct left cosets. Each left coset has $|H|$ elements; therefore, $|G| = [G : H] |H|\text{.}$</p></article></div> <div class="mathbook-content"><article class="theorem-like" id="corollary-cosets-theorem-6"><h6 class="heading"><span class="type">Corollary</span><span class="codenumber">6.11</span></h6><p id="p-992">Suppose that $G$ is a finite group and $g \in G\text{.}$ Then the order of $g$ must divide the number of elements in $G\text{.}$</p></article></div> <div class="mathbook-content"><article class="theorem-like" id="corollary-cosets-theorem-7"><h6 class="heading"><span class="type">Corollary</span><span class="codenumber">6.12</span></h6><p id="p-993">Let $|G| = p$ with $p$ a prime number. Then $G$ is cyclic and any $g \in G$ such that $g \neq e$ is a generator.</p></article><article class="proof" id="proof-40"><h6 class="heading"><span class="type">Proof</span></h6><p id="p-994">Let $g$ be in $G$ such that $g \neq e\text{.}$ Then by Corollary <a href="section-lagranges-theorem.ipynb#corollary-cosets-theorem-6" class="xref" alt="Corollary 6.11 " title="Corollary 6.11 ">6.11</a>, the order of $g$ must divide the order of the group. Since $|\langle g \rangle| \gt 1\text{,}$ it must be $p\text{.}$ Hence, $g$ generates $G\text{.}$</p></article></div> <div class="mathbook-content"><p id="p-995">Corollary <a href="section-lagranges-theorem.ipynb#corollary-cosets-theorem-7" class="xref" alt="Corollary 6.12 " title="Corollary 6.12 ">6.12</a> suggests that groups of prime order $p$ must somehow look like ${\mathbb Z}_p\text{.}$</p></div> <div class="mathbook-content"><article class="theorem-like" id="corollary-cosets-theorem-8"><h6 class="heading"><span class="type">Corollary</span><span class="codenumber">6.13</span></h6><p id="p-996">Let $H$ and $K$ be subgroups of a finite group $G$ such that $G \supset H \supset K\text{.}$ Then</p><div class="displaymath"> \begin{equation*} [G:K] = [G:H][H:K]. \end{equation*} </div></article><article class="proof" id="proof-41"><h6 class="heading"><span class="type">Proof</span></h6><p id="p-997">Observe that</p><div class="displaymath"> \begin{equation*} [G:K] = \frac{|G|}{|K|} = \frac{|G|}{|H|} \cdot \frac{|H|}{|K|} = [G:H][H:K]. \end{equation*} </div></article></div> <div class="mathbook-content"><article class="remark-like" id="remark-5"><h6 class="heading"><span class="type">Remark</span><span class="codenumber">6.14</span><span class="title">The converse of Lagrange's Theorem is false</span></h6><p id="p-998">The group $A_4$ has order 12; however, it can be shown that it does not possess a subgroup of order 6. According to Lagrange's Theorem, subgroups of a group of order 12 can have orders of either 1, 2, 3, 4, or 6. However, we are not guaranteed that subgroups of every possible order exist. To prove that $A_4$ has no subgroup of order 6, we will assume that it does have such a subgroup $H$ and show that a contradiction must occur. Since $A_4$ contains eight 3-cycles, we know that $H$ must contain a 3-cycle. We will show that if $H$ contains one 3-cycle, then it must contain more than 6 elements.</p></article></div> <div class="mathbook-content"><article class="theorem-like" id="proposition-cosets-theorem-10"><h6 class="heading"><span class="type">Proposition</span><span class="codenumber">6.15</span></h6><p id="p-999">The group $A_4$ has no subgroup of order 6.</p></article><article class="proof" id="proof-42"><h6 class="heading"><span class="type">Proof</span></h6><p id="p-1000">Since $[A_4 : H] = 2\text{,}$ there are only two cosets of $H$ in $A_4\text{.}$ Inasmuch as one of the cosets is $H$ itself, right and left cosets must coincide; therefore, $gH = Hg$ or $g H g^{-1} = H$ for every $g \in A_4\text{.}$ Since there are eight 3-cycles in $A_4\text{,}$ at least one 3-cycle must be in $H\text{.}$ Without loss of generality, assume that $(123)$ is in $H\text{.}$ Then $(123)^{-1} = (132)$ must also be in $H\text{.}$ Since $g h g^{-1} \in H$ for all $g \in A_4$ and all $h \in H$ and</p><div class="displaymath"> \begin{align*} (124)(123)(124)^{-1} & = (124)(123)(142) = (243)\\ (243)(123)(243)^{-1} & = (243)(123)(234) = (142) \end{align*} </div><p>we can conclude that $H$ must have at least seven elements</p><div class="displaymath"> \begin{equation*} (1), (123), (132), (243), (243)^{-1} = (234), (142), (142)^{-1} = (124). \end{equation*} </div><p>Therefore, $A_4$ has no subgroup of order 6.</p></article></div> <div class="mathbook-content"><p id="p-1001">In fact, we can say more about when two cycles have the same length.</p></div> <div class="mathbook-content"><article class="theorem-like" id="theorem-cycle-length-theorem"><h6 class="heading"><span class="type">Theorem</span><span class="codenumber">6.16</span></h6><p id="p-1002">Two cycles $\tau$ and $\mu$ in $S_n$ have the same length if and only if there exists a $\sigma \in S_n$ such that $\mu = \sigma \tau \sigma^{-1}\text{.}$</p></article><article class="proof" id="proof-43"><h6 class="heading"><span class="type">Proof</span></h6><p id="p-1003">Suppose that</p><div class="displaymath"> \begin{align*} \tau & = (a_1, a_2, \ldots, a_k )\\ \mu & = (b_1, b_2, \ldots, b_k ). \end{align*} </div><p>Define $\sigma$ to be the permutation</p><div class="displaymath"> \begin{align*} \sigma( a_1 ) & = b_1\\ \sigma( a_2 ) & = b_2\\ & \vdots \\ \sigma( a_k ) & = b_k. \end{align*} </div><p>Then $\mu = \sigma \tau \sigma^{-1}\text{.}$</p><p id="p-1004">Conversely, suppose that $\tau = (a_1, a_2, \ldots, a_k )$ is a $k$-cycle and $\sigma \in S_n\text{.}$ If $\sigma( a_i ) = b$ and $\sigma( a_{(i \bmod k) + 1}) = b'\text{,}$ then $\mu( b) = b'\text{.}$ Hence,</p><div class="displaymath"> \begin{equation*} \mu = ( \sigma(a_1), \sigma(a_2), \ldots, \sigma(a_k) ). \end{equation*} </div><p>Since $\sigma$ is one-to-one and onto, $\mu$ is a cycle of the same length as $\tau\text{.}$</p></article></div>
github_jupyter
%%html <link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" /> <link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" /> <style>.subtitle {font-size:medium; display:block}</style> <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" /> <link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. --> <script> var cell = $(".container .cell").eq(0), ia = cell.find(".input_area") if (cell.find(".toggle-button").length == 0) { ia.after( $('<button class="toggle-button">Toggle hidden code</button>').click( function (){ ia.toggle() } ) ) ia.hide() } </script>
0.429908
0.854521
# Introduction to Python > These documents provide a relatively brief overview of the main features of Python. They are intended as a crash course for students that already have some idea of how to program in another language. Not every aspect of Python is covered and for the most part the contents simply provides lists of what can be done in this language with some brief examples. ## Introduction Python is a modern, robust, high level programming language. It is very easy to pick up even if you are completely new to programming. Python, similar to other languages like matlab or R, is interpreted hence runs slowly compared to C++, Fortran or Java. However writing programs in Python is very quick. Python has a very large collection of libraries for everything from scientific computing to web services. It caters for object oriented and functional programming with module system that allows large and complex applications to be developed in Python. These lectures are using jupyter notebooks which mix Python code with documentation. The python notebooks can be run on a webserver or stand-alone on a computer. To give an indication of what Python code looks like, here is a simple bit of code that defines a set $N=\{1,3,4,5,7\}$ and calculates the sum of the squared elements of this set: $\sum_{i\in N} i^2=100$ ``` N={1,3,4,5,7,8} print('The sum of ∑_i∈N i*i =',sum( i**2 for i in N ) ) ``` ## Contents This course is broken up into a number of notebooks (chapters). * [00](00.ipynb) This introduction with additional information below on how to get started in running python * [01](01.ipynb) Basic data types and operations (numbers, strings) * [02](02.ipynb) String manipulation * [03](03.ipynb) Data structures: Lists and Tuples * [04](04.ipynb) Data structures (continued): dictionaries * [05](05.ipynb) Control statements: if, for, while, try statements * [06](06.ipynb) Functions * [07](07.ipynb) Classes and basic object oriented programming * [08](08.ipynb) Scipy: libraries for arrays (matrices) and plotting * [09](09.ipynb) Mixed Integer Linear Programming using the mymip library. * [10](10.ipynb) Networks and graphs under python - a very brief introduction * [11](11.ipynb) Using the numba library for fast numerical computing. This is a tutorial style introduction to Python. For a quick reminder / summary of Python syntax the following [Quick Reference Card](http://www.cs.put.poznan.pl/csobaniec/software/python/py-qrc.html) may be useful. A longer and more detailed tutorial style introduction to python is available from the python site at: https://docs.python.org/3/tutorial/ ## Installation ### Loging into the web server The easiest way to run this and other notebooks for staff and students at Monash University is to log into the Jupyter server at [https://maxima.erc.monash.edu]. The steps for running notebooks are: * Log in using your monash user name, something like `smith005` - do not use your email address. If you do not have access to this, please contact Andreas Ernst * Press the start button (if prompted by the system) * Use the menu of the jupyter system to upload a .ipynb python notebook file or to start a new notebook. ### Installing Python runs on windows, linux, mac and other environments. There are many python distributions available. However the recommended way to install python under Microsoft Windows or Linux is to use the Anaconda distribution available at [https://www.anaconda.com/distribution/]. If you are installing python from elsewhwer, make sure you get at least Python *3.6* version, not 2.7. The Anaconda distribution comes with the [SciPy](https://www.scipy.org/) collection of scientific python tools as well as the iron python notebook. For developing python code without notebooks consider using [spyder](https://github.com/spyder-ide/spyder) (also included with Anaconda) or your favourite IDE under windows, mac etc (e.g. [Visual Studio Code](https://code.visualstudio.com/docs/languages/python) which handles both plain python programs and notebooks) To open a notebook with anaconda installed, from the terminal run: ipython notebook Note that for the Monash University optimisation course additional modules relating to the commercial optimisation library [CPLEX](https://www.ibm.com/au-en/analytics/cplex-optimizer) and possibly [Gurobi](http://www.gurobi.com/) will be used. These libraries are not available as part of any standard distribution but are available under academic licence. Both CPLEX & Gurobi are included on the [Monash server](https://maxima.erc.monash.edu). ## How to learn from this resource? Download all the notebooks from Moodle or https://gitlab.erc.monash.edu.au/andrease/Python4Maths.git Upload them to the monash server and lauch them or launch ipython notebook from the folder which contains the notebooks. Open each one of them Cell > All Output > Clear This will clear all the outputs and now you can understand each statement and learn interactively. ## Using Notebooks Notebooks contain a mixture of documentation and code cells. Use the menus or buttons at the top of the notebook to run each cell. To get you started: * When creating new notebooks make sure to select the Python3 kernel (the server also allows you to create Matlab, Julia or R notebooks), and _rename the notebook_ to something meaningful using the `File->Rename...` menu option. * Documentation is written in markdown, a form of plain text with some basic formatting options (e.g. using `__text__` to make text bold, or `$latex$` to include mathematical formulas in latex format). For more information see the [Jupyter Markdown](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Working%20With%20Markdown%20Cells.html) documentation or look at the _Help->Markdown_ entry in the menu above * Code cells contain Python code. You can execute just one cell or the whole notbook. <br> **WARNING:** it is possible to get yourself very confused by out-of-order execution of cells. If you you are getting very unexpected results after some time of incremental editing & execution of cells, try using `Kernel -> Restart & Run All` to restart the python interpreter and run all cells. This will ensure the interpreter is running exactly what you see in front of you in the right order, rather than remembering things from an earlier attempt. * You can use the `+` button or `Insert` menu to add new cells anywhere. You can easily add additional cells to try things out as you read through this tutorial. A cell simply is the smallest collection of code that you can execute individually within the notebook. * When you are finished use the `File -> Close & Halt` menu (or the `Shutdown` button in the `Running` tab of the Jupyter file browser) to stop the notebook running. By defaults notebooks will continue running even when you log off and close your web browser. This can be useful if you want to continue where you left off, but occasionally you need to do a cleanup to close some of these. ## License This work is licensed under the Creative Commons Attribution 3.0 Unported License. To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/ <small><font style="font-size:6pt"><i> This Introduction to Python is available from https://gitlab.erc.monash.edu.au/andrease/Python4Maths.git. <br> The original version was written by Rajath Kumar and is available at https://github.com/rajathkumarmp/Python-Lectures. The notes have been significantly rewritten and updated for Python 3 and amended for use in Monash University mathematics courses by [Andreas Ernst](https://research.monash.edu/en/persons/andreas-ernst) </i></font></small>
github_jupyter
N={1,3,4,5,7,8} print('The sum of ∑_i∈N i*i =',sum( i**2 for i in N ) )
0.236604
0.982591
### nicholas chung ### data607 ### 10/24/19 ## assignment: web APIs The New York Times web site provides a rich set of APIs, as described here: https://developer.nytimes.com/apis You’ll need to start by signing up for an API key. Your task is to choose one of the New York Times APIs, construct an interface in Python to read in the JSON data, and transform it into a pandas DataFrame. ### contents * data selection * method a (prep) * method b (transform) * analysis * findings ### data selection https://developer.nytimes.com/docs/books-product/1/overview "The lists/names service returns a list of all the NYT Best Sellers Lists. Some lists are published weekly and others monthly. The response includes when each list was first published and last published." assumptions: * for purpose of initial investigation, assume that each week has a new list ### method a summary of approach: 1. define historical scope of analysis 2. retrieve data from API 3. transform raw data into dataframe 4. save dataframe to CSV ``` import config import datetime import time import requests %matplotlib notebook import matplotlib.pyplot as plt import numpy as np import pandas as pd # define how far back we want to go in our analysis weeks = [] # append dates for each of week of the past "years" years to list years = 1 days = years * 365 for i in range(0, days, 7): weeks.append((datetime.date.today() - datetime.timedelta(i)).isoformat()) print(len(weeks), weeks[0:3]) # store API key as "key" for easy reference key = config.secret # confirm authentication and successful HTTPS request r = requests.get('https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json?api-key=' + key) r.status_code # store URL as fragments for ease of reuse url_a = "https://api.nytimes.com/svc/books/v3/" url_b = "lists/" url_d = "/hardcover-fiction.json?" books = [] book = {} # grab values for each object returned, then store as list of dicts for c in weeks: r = requests.get(url_a + url_b + c + url_d + "api-key=" + key) time.sleep(6) # rate limit guidance as per https://developer.nytimes.com/faq#a11 for i in r.json()['results']['books']: # create dictionary for each result book = { "date": c, "title": i['title'], "rank": i['rank'], "rank_last_week": i['rank_last_week'], "weeks_on_list": i['weeks_on_list'], "description": i['description'], "publisher": i['publisher'], "author": i['author'] } # append dictionary to list books.append(book) # store list of dictionanies to dataframe books_df = pd.DataFrame(books) books_df # save point books_df.to_csv("books.csv") # save point books_df = pd.read_csv("books.csv", index_col = 0) books_df ``` ### method b summary of approach: 1. iterator-based data selection 2. create dataframes from lists of dictionaries ``` # get titles of books that have hit rank 1 at least once ranked_first = [] for i in books_df.title.unique(): if len(books_df.loc[(books_df["rank"] == 1) & (books_df["title"] == i)]) > 0: ranked_first.append(i) # get date that marks longest duration on list for each top-ranked title first = {} firsts = [] for a, b, c, d, e in zip(ranked_first, books_df["weeks_on_list"], books_df["date"], books_df["author"], books_df["publisher"]): first = { "title": a, "weeks_on_list": b, "date": c, "author": d, "publisher": e } firsts.append(first) firsts_df = pd.DataFrame(firsts) firsts_df # get titles of books that have hit rank 2 at most & at least once ranked_second = [] for i in books_df.title.unique(): if len(books_df.loc[(books_df["rank"] == 2) & (books_df["title"] == i)]) > 0: ranked_second.append(i) # list comprehension to get only books that have hit rank 2, at most ranked_second = [x for x in ranked_second if x not in ranked_first] # get date that marks longest duration on list for each second-ranked title second = {} seconds = [] for a, b, c, d, e in zip(ranked_second, books_df["weeks_on_list"], books_df["date"], books_df["author"], books_df["publisher"]): second = { "title": a, "weeks_on_list": b, "date": c, "author": d, "publisher": e } seconds.append(second) seconds_df = pd.DataFrame(seconds) seconds_df # get titles of books that have hit rank 3 ranked_third = [] for i in books_df.title.unique(): if len(books_df.loc[(books_df["rank"] == 3) & (books_df["title"] == i)]) > 0: ranked_third.append(i) # list comprehension to get only books that have hit rank 2, at most ranked_third = [x for x in ranked_third if x not in ranked_first] ranked_third = [x for x in ranked_third if x not in ranked_second] # get date that marks longest duration on list for each second-ranked title third = {} thirds = [] for a, b, c, d, e in zip(ranked_third, books_df["weeks_on_list"], books_df["date"], books_df["author"], books_df["publisher"]): third = { "title": a, "weeks_on_list": b, "date": c, "author": d, "publisher": e } thirds.append(third) thirds_df = pd.DataFrame(thirds) thirds_df ``` ### analysis summary of exploratory approach: * show distribution of titles relative to publishers and authors observations: * a few authors over-index on the best-seller's list * a few publishers over-index on the best-seller's list * titles can be on the best-seller's list for over a year future analysis should look at: * popularity of a title, author, publisher over time * author frequency by unique titles (which authors have most books on list?) ``` titles = [] title = {} for a, b, c in zip(books_df.title.unique(), books_df["author"], books_df["publisher"]): #print(a, b) title = { "title": a, "author": b, "publisher": c } titles.append(title) titles_df = pd.DataFrame(titles) titles_df.describe() ``` ### visualization summary of approach: 1. create dataframes for subsets of data, then merge for comparison 2. plot publisher counts of titles by rank ``` # bar chart of count of publishers' first-ranked titles pub_firsts = firsts_df["publisher"].value_counts() pub_firsts = pd.DataFrame({"publisher":pub_firsts.index, "first":pub_firsts.values}) pub_seconds = seconds_df["publisher"].value_counts() pub_seconds = pd.DataFrame({"publisher":pub_seconds.index, "second":pub_seconds.values}) pub_thirds = thirds_df["publisher"].value_counts() pub_thirds = pd.DataFrame({"publisher":pub_thirds.index, "third":pub_thirds.values}) # merge dataframes for visualization reference pub_ranks = pub_firsts.merge(pub_seconds,on="publisher").merge(pub_thirds,on="publisher") # set index for bar chart pub_ranks = pub_ranks.set_index("publisher") # horizontal bar chart showing publisher's count of best-selling titles by rank ax = pub_ranks.plot.barh() ax.set_xlabel("number of titles") ax.set_ylabel("publishers") ```
github_jupyter
import config import datetime import time import requests %matplotlib notebook import matplotlib.pyplot as plt import numpy as np import pandas as pd # define how far back we want to go in our analysis weeks = [] # append dates for each of week of the past "years" years to list years = 1 days = years * 365 for i in range(0, days, 7): weeks.append((datetime.date.today() - datetime.timedelta(i)).isoformat()) print(len(weeks), weeks[0:3]) # store API key as "key" for easy reference key = config.secret # confirm authentication and successful HTTPS request r = requests.get('https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json?api-key=' + key) r.status_code # store URL as fragments for ease of reuse url_a = "https://api.nytimes.com/svc/books/v3/" url_b = "lists/" url_d = "/hardcover-fiction.json?" books = [] book = {} # grab values for each object returned, then store as list of dicts for c in weeks: r = requests.get(url_a + url_b + c + url_d + "api-key=" + key) time.sleep(6) # rate limit guidance as per https://developer.nytimes.com/faq#a11 for i in r.json()['results']['books']: # create dictionary for each result book = { "date": c, "title": i['title'], "rank": i['rank'], "rank_last_week": i['rank_last_week'], "weeks_on_list": i['weeks_on_list'], "description": i['description'], "publisher": i['publisher'], "author": i['author'] } # append dictionary to list books.append(book) # store list of dictionanies to dataframe books_df = pd.DataFrame(books) books_df # save point books_df.to_csv("books.csv") # save point books_df = pd.read_csv("books.csv", index_col = 0) books_df # get titles of books that have hit rank 1 at least once ranked_first = [] for i in books_df.title.unique(): if len(books_df.loc[(books_df["rank"] == 1) & (books_df["title"] == i)]) > 0: ranked_first.append(i) # get date that marks longest duration on list for each top-ranked title first = {} firsts = [] for a, b, c, d, e in zip(ranked_first, books_df["weeks_on_list"], books_df["date"], books_df["author"], books_df["publisher"]): first = { "title": a, "weeks_on_list": b, "date": c, "author": d, "publisher": e } firsts.append(first) firsts_df = pd.DataFrame(firsts) firsts_df # get titles of books that have hit rank 2 at most & at least once ranked_second = [] for i in books_df.title.unique(): if len(books_df.loc[(books_df["rank"] == 2) & (books_df["title"] == i)]) > 0: ranked_second.append(i) # list comprehension to get only books that have hit rank 2, at most ranked_second = [x for x in ranked_second if x not in ranked_first] # get date that marks longest duration on list for each second-ranked title second = {} seconds = [] for a, b, c, d, e in zip(ranked_second, books_df["weeks_on_list"], books_df["date"], books_df["author"], books_df["publisher"]): second = { "title": a, "weeks_on_list": b, "date": c, "author": d, "publisher": e } seconds.append(second) seconds_df = pd.DataFrame(seconds) seconds_df # get titles of books that have hit rank 3 ranked_third = [] for i in books_df.title.unique(): if len(books_df.loc[(books_df["rank"] == 3) & (books_df["title"] == i)]) > 0: ranked_third.append(i) # list comprehension to get only books that have hit rank 2, at most ranked_third = [x for x in ranked_third if x not in ranked_first] ranked_third = [x for x in ranked_third if x not in ranked_second] # get date that marks longest duration on list for each second-ranked title third = {} thirds = [] for a, b, c, d, e in zip(ranked_third, books_df["weeks_on_list"], books_df["date"], books_df["author"], books_df["publisher"]): third = { "title": a, "weeks_on_list": b, "date": c, "author": d, "publisher": e } thirds.append(third) thirds_df = pd.DataFrame(thirds) thirds_df titles = [] title = {} for a, b, c in zip(books_df.title.unique(), books_df["author"], books_df["publisher"]): #print(a, b) title = { "title": a, "author": b, "publisher": c } titles.append(title) titles_df = pd.DataFrame(titles) titles_df.describe() # bar chart of count of publishers' first-ranked titles pub_firsts = firsts_df["publisher"].value_counts() pub_firsts = pd.DataFrame({"publisher":pub_firsts.index, "first":pub_firsts.values}) pub_seconds = seconds_df["publisher"].value_counts() pub_seconds = pd.DataFrame({"publisher":pub_seconds.index, "second":pub_seconds.values}) pub_thirds = thirds_df["publisher"].value_counts() pub_thirds = pd.DataFrame({"publisher":pub_thirds.index, "third":pub_thirds.values}) # merge dataframes for visualization reference pub_ranks = pub_firsts.merge(pub_seconds,on="publisher").merge(pub_thirds,on="publisher") # set index for bar chart pub_ranks = pub_ranks.set_index("publisher") # horizontal bar chart showing publisher's count of best-selling titles by rank ax = pub_ranks.plot.barh() ax.set_xlabel("number of titles") ax.set_ylabel("publishers")
0.25842
0.869825
``` import nltk from nltk.corpus import stopwords import pandas as pd import numpy as np import re from keras.preprocessing.sequence import pad_sequences from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout, Embedding data = pd.read_csv('Dataset/hotel_data.csv') data.head(5) data.city.value_counts() array = ['Mumbai'] data = data.loc[data['city'].isin(array)] data.head(5) data = data.hotel_overview data = data.dropna() stop = set(stopwords.words('english')) def stopwords_removal(data_point): data = [x for x in data_point.split() if x not in stop] return data def clean_data(data): cleaned_data = [] all_unique_words_in_each_description = [] for entry in data: entry = re.sub(pattern='[^a-zA-Z]',repl=' ',string = entry) entry = re.sub(r'\b\w{0,1}\b', repl=' ',string = entry) entry = entry.lower() entry = stopwords_removal(entry) cleaned_data.append(entry) unique = list(set(entry)) all_unique_words_in_each_description.extend(unique) return cleaned_data, all_unique_words_in_each_description def unique_words(data): unique_words = set(all_unique_words_in_each_description) return unique_words, len(unique_words) cleaned_data, all_unique_words_in_each_description = clean_data(data) unique_words, length_of_unique_words = unique_words(all_unique_words_in_each_description) cleaned_data[0] length_of_unique_words def build_indices(unique_words): word_to_idx = {} idx_to_word = {} for i, word in enumerate(unique_words): word_to_idx[word] = i idx_to_word[i] = word return word_to_idx, idx_to_word word_to_idx, idx_to_word = build_indices(unique_words) def prepare_corpus(corpus, word_to_idx): sequences = [] for line in corpus: tokens = line for i in range(1, len(tokens)): i_gram_sequence = tokens[:i+1] i_gram_sequence_ids = [] for j, token in enumerate(i_gram_sequence): i_gram_sequence_ids.append(word_to_idx[token]) sequences.append(i_gram_sequence_ids) return sequences sequences = prepare_corpus(cleaned_data, word_to_idx) max_sequence_len = max([len(x) for x in sequences]) print(sequences[0]) print(sequences[1]) print(idx_to_word[1647]) print(idx_to_word[867]) print(idx_to_word[1452]) len(sequences) max_sequence_len def build_input_data(sequences, max_sequence_len, length_of_unique_words): sequences = np.array(pad_sequences(sequences, maxlen = max_sequence_len, padding = 'pre')) X = sequences[:,:-1] y = sequences[:,-1] y = np_utils.to_categorical(y, length_of_unique_words) return X, y X, y = build_input_data(sequences, max_sequence_len, length_of_unique_words) def create_model(max_sequence_len, length_of_unique_words): model = Sequential() model.add(Embedding(length_of_unique_words, 10, input_length=max_sequence_len - 1)) model.add(LSTM(128)) model.add(Dropout(0.2)) model.add(Dense(length_of_unique_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model model = create_model(max_sequence_len, length_of_unique_words) model.summary() len(X) model.fit(X, y, batch_size = 512, epochs=100) def generate_text(seed_text, next_words, model, max_seq_len): for _ in range(next_words): cleaned_data = clean_data([seed_text]) sequences= prepare_corpus(cleaned_data[0], word_to_idx) sequences = pad_sequences([sequences[-1]], maxlen=max_seq_len-1, padding='pre') predicted = model.predict_classes(sequences, verbose=0) output_word = '' output_word = idx_to_word[predicted[0]] seed_text = seed_text + " " + output_word return seed_text.title() print(generate_text("in Mumbai there we need", 30, model, max_sequence_len)) print(generate_text("Best Hotel Mumbai", 30, model, max_sequence_len)) print(generate_text("The beauty of the city", 30, model, max_sequence_len)) model_structure = model.to_json() with open("Output Files/text_generation_using_LSTM.json", "w") as json_file: json_file.write(model_structure) model.save_weights("Output Files/text_generation_using_LSTM.h5") ```
github_jupyter
import nltk from nltk.corpus import stopwords import pandas as pd import numpy as np import re from keras.preprocessing.sequence import pad_sequences from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout, Embedding data = pd.read_csv('Dataset/hotel_data.csv') data.head(5) data.city.value_counts() array = ['Mumbai'] data = data.loc[data['city'].isin(array)] data.head(5) data = data.hotel_overview data = data.dropna() stop = set(stopwords.words('english')) def stopwords_removal(data_point): data = [x for x in data_point.split() if x not in stop] return data def clean_data(data): cleaned_data = [] all_unique_words_in_each_description = [] for entry in data: entry = re.sub(pattern='[^a-zA-Z]',repl=' ',string = entry) entry = re.sub(r'\b\w{0,1}\b', repl=' ',string = entry) entry = entry.lower() entry = stopwords_removal(entry) cleaned_data.append(entry) unique = list(set(entry)) all_unique_words_in_each_description.extend(unique) return cleaned_data, all_unique_words_in_each_description def unique_words(data): unique_words = set(all_unique_words_in_each_description) return unique_words, len(unique_words) cleaned_data, all_unique_words_in_each_description = clean_data(data) unique_words, length_of_unique_words = unique_words(all_unique_words_in_each_description) cleaned_data[0] length_of_unique_words def build_indices(unique_words): word_to_idx = {} idx_to_word = {} for i, word in enumerate(unique_words): word_to_idx[word] = i idx_to_word[i] = word return word_to_idx, idx_to_word word_to_idx, idx_to_word = build_indices(unique_words) def prepare_corpus(corpus, word_to_idx): sequences = [] for line in corpus: tokens = line for i in range(1, len(tokens)): i_gram_sequence = tokens[:i+1] i_gram_sequence_ids = [] for j, token in enumerate(i_gram_sequence): i_gram_sequence_ids.append(word_to_idx[token]) sequences.append(i_gram_sequence_ids) return sequences sequences = prepare_corpus(cleaned_data, word_to_idx) max_sequence_len = max([len(x) for x in sequences]) print(sequences[0]) print(sequences[1]) print(idx_to_word[1647]) print(idx_to_word[867]) print(idx_to_word[1452]) len(sequences) max_sequence_len def build_input_data(sequences, max_sequence_len, length_of_unique_words): sequences = np.array(pad_sequences(sequences, maxlen = max_sequence_len, padding = 'pre')) X = sequences[:,:-1] y = sequences[:,-1] y = np_utils.to_categorical(y, length_of_unique_words) return X, y X, y = build_input_data(sequences, max_sequence_len, length_of_unique_words) def create_model(max_sequence_len, length_of_unique_words): model = Sequential() model.add(Embedding(length_of_unique_words, 10, input_length=max_sequence_len - 1)) model.add(LSTM(128)) model.add(Dropout(0.2)) model.add(Dense(length_of_unique_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model model = create_model(max_sequence_len, length_of_unique_words) model.summary() len(X) model.fit(X, y, batch_size = 512, epochs=100) def generate_text(seed_text, next_words, model, max_seq_len): for _ in range(next_words): cleaned_data = clean_data([seed_text]) sequences= prepare_corpus(cleaned_data[0], word_to_idx) sequences = pad_sequences([sequences[-1]], maxlen=max_seq_len-1, padding='pre') predicted = model.predict_classes(sequences, verbose=0) output_word = '' output_word = idx_to_word[predicted[0]] seed_text = seed_text + " " + output_word return seed_text.title() print(generate_text("in Mumbai there we need", 30, model, max_sequence_len)) print(generate_text("Best Hotel Mumbai", 30, model, max_sequence_len)) print(generate_text("The beauty of the city", 30, model, max_sequence_len)) model_structure = model.to_json() with open("Output Files/text_generation_using_LSTM.json", "w") as json_file: json_file.write(model_structure) model.save_weights("Output Files/text_generation_using_LSTM.h5")
0.557604
0.229363
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/sprinkler_pgm.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Directed graphical models We illustrate some basic properties of DGMs. ``` try: from causalgraphicalmodels import CausalGraphicalModel except ModuleNotFoundError: %pip install causalgraphicalmodels from causalgraphicalmodels import CausalGraphicalModel try: import pgmpy except ModuleNotFoundError: %pip install pgmpy import pgmpy import numpy as np import pandas as pd ``` # Make the model ``` sprinkler = CausalGraphicalModel( nodes=["cloudy", "rain", "sprinkler", "wet", "slippery"], edges=[("cloudy", "rain"), ("cloudy", "sprinkler"), ("rain", "wet"), ("sprinkler", "wet"), ("wet", "slippery")], ) ``` # Draw the model ``` # draw return a graphviz `dot` object, which jupyter can render out = sprinkler.draw() type(out) display(out) out.render() ``` # Display the factorization ``` print(sprinkler.get_distribution()) ``` # D-separation ``` # check for d-seperation of two nodes sprinkler.is_d_separated("slippery", "cloudy", {"wet"}) ``` # Extract CI relationships ``` # get all the conditional independence relationships implied by a CGM CI = sprinkler.get_all_independence_relationships() print(CI) records = [] for ci in CI: record = (ci[0], ci[1], ", ".join(x for x in ci[2])) records.append(record) print(records) df = pd.DataFrame(records, columns=("X", "Y", "Z")) display(df) print(df.to_latex(index=False)) ``` # Parameterize the model ``` try: from pgmpy.models import BayesianModel except ModuleNotFoundError: %pip install pgmpy from pgmpy.models import BayesianModel from pgmpy.factors.discrete import TabularCPD # Defining the model structure. We can define the network by just passing a list of edges. model = BayesianModel([("C", "S"), ("C", "R"), ("S", "W"), ("R", "W"), ("W", "L")]) # Defining individual CPDs. cpd_c = TabularCPD(variable="C", variable_card=2, values=np.reshape([0.5, 0.5], (2, 1))) # In pgmpy the columns are the evidences and rows are the states of the variable. cpd_s = TabularCPD(variable="S", variable_card=2, values=[[0.5, 0.9], [0.5, 0.1]], evidence=["C"], evidence_card=[2]) cpd_r = TabularCPD(variable="R", variable_card=2, values=[[0.8, 0.2], [0.2, 0.8]], evidence=["C"], evidence_card=[2]) cpd_w = TabularCPD( variable="W", variable_card=2, values=[[1.0, 0.1, 0.1, 0.01], [0.0, 0.9, 0.9, 0.99]], evidence=["S", "R"], evidence_card=[2, 2], ) cpd_l = TabularCPD(variable="L", variable_card=2, values=[[0.9, 0.1], [0.1, 0.9]], evidence=["W"], evidence_card=[2]) # Associating the CPDs with the network model.add_cpds(cpd_c, cpd_s, cpd_r, cpd_w, cpd_l) # check_model checks for the network structure and CPDs and verifies that the CPDs are correctly # defined and sum to 1. model.check_model() ``` # Inference ``` try: from pgmpy.inference import VariableElimination except ModuleNotFoundError: %pip install pgmpy from pgmpy.inference import VariableElimination infer = VariableElimination(model) # p(R=1)= 0.5*0.2 + 0.5*0.8 = 0.5 probs = infer.query(["R"]).values print("\np(R=1) = ", probs[1]) # P(R=1|W=1) = 0.7079 probs = infer.query(["R"], evidence={"W": 1}).values print("\np(R=1|W=1) = ", probs[1]) # P(R=1|W=1,S=1) = 0.3204 probs = infer.query(["R"], evidence={"W": 1, "S": 1}).values print("\np(R=1|W=1,S=1) = ", probs[1]) ```
github_jupyter
try: from causalgraphicalmodels import CausalGraphicalModel except ModuleNotFoundError: %pip install causalgraphicalmodels from causalgraphicalmodels import CausalGraphicalModel try: import pgmpy except ModuleNotFoundError: %pip install pgmpy import pgmpy import numpy as np import pandas as pd sprinkler = CausalGraphicalModel( nodes=["cloudy", "rain", "sprinkler", "wet", "slippery"], edges=[("cloudy", "rain"), ("cloudy", "sprinkler"), ("rain", "wet"), ("sprinkler", "wet"), ("wet", "slippery")], ) # draw return a graphviz `dot` object, which jupyter can render out = sprinkler.draw() type(out) display(out) out.render() print(sprinkler.get_distribution()) # check for d-seperation of two nodes sprinkler.is_d_separated("slippery", "cloudy", {"wet"}) # get all the conditional independence relationships implied by a CGM CI = sprinkler.get_all_independence_relationships() print(CI) records = [] for ci in CI: record = (ci[0], ci[1], ", ".join(x for x in ci[2])) records.append(record) print(records) df = pd.DataFrame(records, columns=("X", "Y", "Z")) display(df) print(df.to_latex(index=False)) try: from pgmpy.models import BayesianModel except ModuleNotFoundError: %pip install pgmpy from pgmpy.models import BayesianModel from pgmpy.factors.discrete import TabularCPD # Defining the model structure. We can define the network by just passing a list of edges. model = BayesianModel([("C", "S"), ("C", "R"), ("S", "W"), ("R", "W"), ("W", "L")]) # Defining individual CPDs. cpd_c = TabularCPD(variable="C", variable_card=2, values=np.reshape([0.5, 0.5], (2, 1))) # In pgmpy the columns are the evidences and rows are the states of the variable. cpd_s = TabularCPD(variable="S", variable_card=2, values=[[0.5, 0.9], [0.5, 0.1]], evidence=["C"], evidence_card=[2]) cpd_r = TabularCPD(variable="R", variable_card=2, values=[[0.8, 0.2], [0.2, 0.8]], evidence=["C"], evidence_card=[2]) cpd_w = TabularCPD( variable="W", variable_card=2, values=[[1.0, 0.1, 0.1, 0.01], [0.0, 0.9, 0.9, 0.99]], evidence=["S", "R"], evidence_card=[2, 2], ) cpd_l = TabularCPD(variable="L", variable_card=2, values=[[0.9, 0.1], [0.1, 0.9]], evidence=["W"], evidence_card=[2]) # Associating the CPDs with the network model.add_cpds(cpd_c, cpd_s, cpd_r, cpd_w, cpd_l) # check_model checks for the network structure and CPDs and verifies that the CPDs are correctly # defined and sum to 1. model.check_model() try: from pgmpy.inference import VariableElimination except ModuleNotFoundError: %pip install pgmpy from pgmpy.inference import VariableElimination infer = VariableElimination(model) # p(R=1)= 0.5*0.2 + 0.5*0.8 = 0.5 probs = infer.query(["R"]).values print("\np(R=1) = ", probs[1]) # P(R=1|W=1) = 0.7079 probs = infer.query(["R"], evidence={"W": 1}).values print("\np(R=1|W=1) = ", probs[1]) # P(R=1|W=1,S=1) = 0.3204 probs = infer.query(["R"], evidence={"W": 1, "S": 1}).values print("\np(R=1|W=1,S=1) = ", probs[1])
0.487307
0.929887
# Nonexistence of a distance-regular graph with intersection array $\{(2r+1)(4r+1)(4t-1), 8r(4rt-r+2t), (r+t)(4r+1); 1, (r+t)(4r+1), 4r(2r+1)(4t-1)\}$ We will show that a distance-regular graph with intersection array $\{(2r+1)(4r+1)(4t-1), 8r(4rt-r+2t), (r+t)(4r+1); 1, (r+t)(4r+1), 4r(2r+1)(4t-1)\}$, where $r, t \ge 1$, does not exist. The intersection array arises for graphs of diameter $3$ with $b_2 = c_2$ and $p^3_{33}$ even which are $Q$-polynomial with respect to the natural ordering of eigenvalues and contain a maximal $1$-code that is both locally regular and last subconstituent perfect. See [Extremal $1$-codes in distance-regular graphs of diameter $3$](http://link.springer.com/article/10.1007/s10623-012-9651-0) by A. Jurišić and J. Vidali, where the theory for dealing with such intersection arrays has been developed. ``` %display latex import drg ``` This family is not entirely feasible, however, we will find two infinite feasible subfamilies. ``` r, t = var("r t") p = drg.DRGParameters([(2*r+1)*(4*r+1)*(4*t-1), 8*r*(4*r*t-r+2*t), (r+t)*(4*r+1)], [1, (r+t)*(4*r+1), 4*r*(2*r+1)*(4*t-1)]) p.order(expand=True, factor=True) ``` The two feasible subfamilies can be obtained by setting $t = 4r^2$ and $t = 2r(2r+1)$, respectively. ``` pA = p.subs(t == 4*r^2) pA.intersectionArray(expand=True, factor=True) show(pA) show(pA.order(expand=True, factor=True)) pB = p.subs(t == 2*r*(2*r+1)) pB.intersectionArray(expand=True, factor=True) show(pB) show(pB.order(expand=True, factor=True)) ``` Let us check that the first members of each family are indeed feasible. We skip the family nonexistence check since the intersection array of the entire family is already included. ``` pA1 = pA.subs(r == 1) show(pA1) show(pA1.order()) pA1.check_feasible(skip=["family"]) pB1 = pB.subs(r == 1) show(pB1) show(pB1.order()) pB1.check_feasible(skip=["family"]) ``` We now compute the Krein parameters. We have $q^1_{13} = q^1_{31} = q^3_{11} = 0$, so the graph would be $Q$-polynomial with respect to the natural ordering of the eigenvalues. ``` p.set_vars([t, r]) [p.q[1, 1, 3], p.q[1, 3, 1], p.q[3, 1, 1]] ``` We now compute the triple intersection numbers with respect to three vertices $u, v, w$ at mutual distances $3$. Let us first check that $p^3_{33}$ is positive. ``` p.p[3, 3, 3].factor().simplify_full() ``` The parameter $\alpha$ will denote the number of vertices at distance $3$ from all of $u, v, w$. Let us count the number of vertices at distance $1$ or $2$ from one of $u, v, w$ and $3$ from the other two vertices. ``` alpha = var("alpha") S333 = p.tripleEquations(3, 3, 3, params={alpha: (3, 3, 3)}) [S333[s].expand().factor() for s in [(1, 3, 3), (3, 1, 3), (3, 3, 1), (2, 3, 3), (3, 2, 3), (3, 3, 2)]] ``` Note that for the above expressions to be nonnegative, we must have $a = 4r - 1$, and then they are all equal to zero. Consequently, all of the $a_3$ vertices adjacent to one of $u, v, w$ which are at distance $3$ from another of these vertices are at distance $2$ from the remaining vertex in the triple. ``` S333a = S333.subs(alpha == 4*r - 1) show(p.a[3].expand().factor()) [S333a[s].expand().factor() for s in [(1, 2, 3), (3, 1, 2), (2, 3, 1), (2, 1, 3), (3, 2, 1), (1, 3, 2)]] ``` The above results mean that any two vertices $v, w$ at distance $3$ uniquely define a set $C$ of $4r + 2$ vertices mutually at distance $3$ containing $v, w$ - i.e., a $1$-code in the graph. Furthermore, since $a_3$ is nonzero, for each $u$ in $C \setminus \{v, w\}$, there are vertices at distances $3, 1, 2$ from $u, v, w$. We now check that $c_3 = a_3 p^3_{33}$. ``` show(p.c[3].expand().factor()) show((p.a[3] * p.p[3, 3, 3]).expand().factor()) ``` Let $u'$ be a neighbour of $v$. Since $u'$ is not in $C$, it may be at distance $3$ from at most one vertex of $C$. As there are $c_3$ and $a_3$ neighbours of $v$ that are at distances $2$ and $3$ from $w$, respectively, the above equality implies that each neighbour of $v$ is at distance $3$ from precisely one vertex of $C$. Suppose now that $u'$ is at distance 2 from $w$. Let us count the number of vertices at distances $1, 1, 3$ from $u', v, w$. ``` beta = var("beta") S123 = p.tripleEquations(1, 2, 3, params={beta: (3, 3, 3)}).subs(beta == 1) S123[1, 1, 3] ``` As this value is nonintegral, we conclude that a graph with intersection array $\{(2r+1)(4r+1)(4t-1), 8r(4rt-r+2t), (r+t)(4r+1); 1, (r+t)(4r+1), 4r(2r+1)(4t-1)\}$ and $r, t \ge 1$ **does not exist**.
github_jupyter
%display latex import drg r, t = var("r t") p = drg.DRGParameters([(2*r+1)*(4*r+1)*(4*t-1), 8*r*(4*r*t-r+2*t), (r+t)*(4*r+1)], [1, (r+t)*(4*r+1), 4*r*(2*r+1)*(4*t-1)]) p.order(expand=True, factor=True) pA = p.subs(t == 4*r^2) pA.intersectionArray(expand=True, factor=True) show(pA) show(pA.order(expand=True, factor=True)) pB = p.subs(t == 2*r*(2*r+1)) pB.intersectionArray(expand=True, factor=True) show(pB) show(pB.order(expand=True, factor=True)) pA1 = pA.subs(r == 1) show(pA1) show(pA1.order()) pA1.check_feasible(skip=["family"]) pB1 = pB.subs(r == 1) show(pB1) show(pB1.order()) pB1.check_feasible(skip=["family"]) p.set_vars([t, r]) [p.q[1, 1, 3], p.q[1, 3, 1], p.q[3, 1, 1]] p.p[3, 3, 3].factor().simplify_full() alpha = var("alpha") S333 = p.tripleEquations(3, 3, 3, params={alpha: (3, 3, 3)}) [S333[s].expand().factor() for s in [(1, 3, 3), (3, 1, 3), (3, 3, 1), (2, 3, 3), (3, 2, 3), (3, 3, 2)]] S333a = S333.subs(alpha == 4*r - 1) show(p.a[3].expand().factor()) [S333a[s].expand().factor() for s in [(1, 2, 3), (3, 1, 2), (2, 3, 1), (2, 1, 3), (3, 2, 1), (1, 3, 2)]] show(p.c[3].expand().factor()) show((p.a[3] * p.p[3, 3, 3]).expand().factor()) beta = var("beta") S123 = p.tripleEquations(1, 2, 3, params={beta: (3, 3, 3)}).subs(beta == 1) S123[1, 1, 3]
0.262369
0.990376
``` import argparse from pylab import cm # %load ../scripts/colorcif.py ''' A small CLI script to genereate high-quality images from cif files with symmetry non-unique atoms colored with different colors. ''' import argparse import os import numpy as np from pylab import get_cmap, cm import ase.io from ase.data.colors import jmol_colors WHITE = (1.000, 1.000, 1.000) LGRAY = (0.800, 0.800, 0.800) COLORMAPS = [m for m in cm.datad.keys() if not m.endswith("_r")] def hsv2rgb(h, s, v): """http://en.wikipedia.org/wiki/HSL_and_HSV h (hue) in [0, 360[ s (saturation) in [0, 1] v (value) in [0, 1] return rgb in range [0, 1] """ if v == 0: return 0, 0, 0 if s == 0: return v, v, v i, f = divmod(h / 60., 1) p = v * (1 - s) q = v * (1 - s * f) t = v * (1 - s * (1 - f)) if i == 0: return v, t, p elif i == 1: return q, v, p elif i == 2: return p, v, t elif i == 3: return p, q, v elif i == 4: return t, p, v elif i == 5: return v, p, q else: raise RuntimeError('h must be in [0, 360]') def hsv(array, s=.9, v=.9): array = (array - array.min()) * 359. / (array.max() - array.min()) result = np.empty((len(array.flat), 3)) for rgb, h in zip(result, array.flat): rgb[:] = hsv2rgb(h, s, v) return np.reshape(result, array.shape + (3,)) def get_colors(cmap, array): ''' Get `numc` from a matplotlib colormap `cmap` ''' cm = get_cmap(cmap) grid = (array - array.min())*1.0/(array.max() - array.min()) result = np.zeros((len(array), 3)) for rgb, x in zip(result, grid): rgb[:] = cm(x)[:3] return result def parse_arguments(arguments=None): parser = argparse.ArgumentParser() parser.add_argument("cif", help="cif file") parser.add_argument("-t", "--texture", choices=['jmol', 'glass', 'ase3', 'vmd'], default="jmol") parser.add_argument("-T", action="store_true", help="highlight only different T-atoms") parser.add_argument("-O", action="store_true", help="highlight only atoms that are NOT T-atoms") parser.add_argument("-c", "--colormap", choices=COLORMAPS, default=None, help="matplotlib colormap see: http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps") parser.add_argument("-o", "--output", help="name of the output file", default=None) parser.add_argument("-x", default="0", help="angle of rotation around the x axis") parser.add_argument("-y", default="0", help="angle of rotation around the y axis") parser.add_argument("-z", default="0", help="angle of rotation around the z axis") if arguments is not None: return parser.parse_args(arguments) else: return parser.parse_args() def generate_image(args): if args.output is None: args.output = os.path.splitext(args.cif)[0] + '.pov' mol = ase.io.read(args.cif) sg = mol.info['spacegroup'] mol.set_tags(sg.tag_sites(mol.get_scaled_positions())) # found using ase-gui menu 'view -> rotate' rotation = '{x}x, {y}y, {z}z'.format(x=args.x, y=args.y, z=args.z) natoms = np.shape(mol.get_positions())[0] # set which atoms to color and how colors = np.zeros((natoms, 3)) if not (args.T or args.O): if args.colormap: colors = get_colors(args.colormap, mol.get_tags()) else: colors = hsv(mol.get_tags()) else: # create a mask to select the T atoms and the rest Tmask = mol.get_atomic_numbers() != 8 notTmask = np.logical_not(Tmask) if args.T: tags = mol.get_tags()[Tmask] if args.colormap: colors[Tmask] = get_colors(args.colormap, tags) else: colors[Tmask] = hsv(tags) # set the color of other atoms to gray colors[notTmask] = np.tile(np.asarray(LGRAY), (notTmask.sum(), 1)) # default ase atom colors from jmol #colors[notTmask] = jmol_colors[mol.get_atomic_numbers()[notTmask]] elif args.O: tags = mol.get_tags()[notTmask] if args.colormap: colors[notTmask] = get_colors(args.colormap, tags) else: colors[notTmask] = hsv(tags) # set the color of other atoms to gray colors[Tmask] = np.tile(np.asarray(LGRAY), (Tmask.sum(), 1)) # default ase atom colors from jmol #colors[Tmask] = jmol_colors[mol.get_atomic_numbers()[Tmask]] # Textures tex = [args.texture,] * natoms # keyword options for eps, pngand pov files kwargs = { 'rotation': rotation, 'show_unit_cell': 0, 'colors': colors, 'radii': None, } # keyword options for povray files only extra_kwargs = { 'display' : False, # Display while rendering 'pause' : False, # Pause when done rendering (only if display) 'transparent' : False, # Transparent background 'canvas_width' : 400, # Width of canvas in pixels 'canvas_height': None, # Height of canvas in pixels 'camera_dist' : 50., # Distance from camera to front atom 'image_plane' : None, # Distance from front atom to image plane # (focal depth for perspective) 'camera_type' : 'perspective', # perspective, ultra_wide_angle 'point_lights' : [], # [[loc1, color1], [loc2, color2],...] 'area_light' : [(2., 3., 40.) ,# location 'White', # color .7, .7, 3, 3], # width, height, Nlamps_x, Nlamps_y 'background' : 'White', # color 'textures' : tex, # Length of atoms list of texture names 'celllinewidth': 0.05, # Radius of the cylinders representing the cell } # Make the color of the glass beads semi-transparent #colors2 = np.zeros((natoms, 4)) #colors2[:, :3] = colors #colors2[:, 3] = 0.95 kwargs['colors'] = colors kwargs.update(extra_kwargs) # Make the raytraced image ase.io.write(args.output, mol, run_povray=True, **kwargs) def main(): args = parse_arguments() generate_image(args) for cmap in maps[:6]: args = parse_arguments(["-T", "--output=ton_{}_t.pov".format(cmap), "--colormap={}".format(cmap), "TON.cif"]) generate_image(args) from IPython.display import Image, HTML, display from glob import glob imagesList=''.join( ["<img style='width: 150px; margin: 0px; float: left; border: 1px solid black;' src='%s' />" % str(s) for s in sorted(glob('ton_*_t.png')) ]) display(HTML(imagesList)) ```
github_jupyter
import argparse from pylab import cm # %load ../scripts/colorcif.py ''' A small CLI script to genereate high-quality images from cif files with symmetry non-unique atoms colored with different colors. ''' import argparse import os import numpy as np from pylab import get_cmap, cm import ase.io from ase.data.colors import jmol_colors WHITE = (1.000, 1.000, 1.000) LGRAY = (0.800, 0.800, 0.800) COLORMAPS = [m for m in cm.datad.keys() if not m.endswith("_r")] def hsv2rgb(h, s, v): """http://en.wikipedia.org/wiki/HSL_and_HSV h (hue) in [0, 360[ s (saturation) in [0, 1] v (value) in [0, 1] return rgb in range [0, 1] """ if v == 0: return 0, 0, 0 if s == 0: return v, v, v i, f = divmod(h / 60., 1) p = v * (1 - s) q = v * (1 - s * f) t = v * (1 - s * (1 - f)) if i == 0: return v, t, p elif i == 1: return q, v, p elif i == 2: return p, v, t elif i == 3: return p, q, v elif i == 4: return t, p, v elif i == 5: return v, p, q else: raise RuntimeError('h must be in [0, 360]') def hsv(array, s=.9, v=.9): array = (array - array.min()) * 359. / (array.max() - array.min()) result = np.empty((len(array.flat), 3)) for rgb, h in zip(result, array.flat): rgb[:] = hsv2rgb(h, s, v) return np.reshape(result, array.shape + (3,)) def get_colors(cmap, array): ''' Get `numc` from a matplotlib colormap `cmap` ''' cm = get_cmap(cmap) grid = (array - array.min())*1.0/(array.max() - array.min()) result = np.zeros((len(array), 3)) for rgb, x in zip(result, grid): rgb[:] = cm(x)[:3] return result def parse_arguments(arguments=None): parser = argparse.ArgumentParser() parser.add_argument("cif", help="cif file") parser.add_argument("-t", "--texture", choices=['jmol', 'glass', 'ase3', 'vmd'], default="jmol") parser.add_argument("-T", action="store_true", help="highlight only different T-atoms") parser.add_argument("-O", action="store_true", help="highlight only atoms that are NOT T-atoms") parser.add_argument("-c", "--colormap", choices=COLORMAPS, default=None, help="matplotlib colormap see: http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps") parser.add_argument("-o", "--output", help="name of the output file", default=None) parser.add_argument("-x", default="0", help="angle of rotation around the x axis") parser.add_argument("-y", default="0", help="angle of rotation around the y axis") parser.add_argument("-z", default="0", help="angle of rotation around the z axis") if arguments is not None: return parser.parse_args(arguments) else: return parser.parse_args() def generate_image(args): if args.output is None: args.output = os.path.splitext(args.cif)[0] + '.pov' mol = ase.io.read(args.cif) sg = mol.info['spacegroup'] mol.set_tags(sg.tag_sites(mol.get_scaled_positions())) # found using ase-gui menu 'view -> rotate' rotation = '{x}x, {y}y, {z}z'.format(x=args.x, y=args.y, z=args.z) natoms = np.shape(mol.get_positions())[0] # set which atoms to color and how colors = np.zeros((natoms, 3)) if not (args.T or args.O): if args.colormap: colors = get_colors(args.colormap, mol.get_tags()) else: colors = hsv(mol.get_tags()) else: # create a mask to select the T atoms and the rest Tmask = mol.get_atomic_numbers() != 8 notTmask = np.logical_not(Tmask) if args.T: tags = mol.get_tags()[Tmask] if args.colormap: colors[Tmask] = get_colors(args.colormap, tags) else: colors[Tmask] = hsv(tags) # set the color of other atoms to gray colors[notTmask] = np.tile(np.asarray(LGRAY), (notTmask.sum(), 1)) # default ase atom colors from jmol #colors[notTmask] = jmol_colors[mol.get_atomic_numbers()[notTmask]] elif args.O: tags = mol.get_tags()[notTmask] if args.colormap: colors[notTmask] = get_colors(args.colormap, tags) else: colors[notTmask] = hsv(tags) # set the color of other atoms to gray colors[Tmask] = np.tile(np.asarray(LGRAY), (Tmask.sum(), 1)) # default ase atom colors from jmol #colors[Tmask] = jmol_colors[mol.get_atomic_numbers()[Tmask]] # Textures tex = [args.texture,] * natoms # keyword options for eps, pngand pov files kwargs = { 'rotation': rotation, 'show_unit_cell': 0, 'colors': colors, 'radii': None, } # keyword options for povray files only extra_kwargs = { 'display' : False, # Display while rendering 'pause' : False, # Pause when done rendering (only if display) 'transparent' : False, # Transparent background 'canvas_width' : 400, # Width of canvas in pixels 'canvas_height': None, # Height of canvas in pixels 'camera_dist' : 50., # Distance from camera to front atom 'image_plane' : None, # Distance from front atom to image plane # (focal depth for perspective) 'camera_type' : 'perspective', # perspective, ultra_wide_angle 'point_lights' : [], # [[loc1, color1], [loc2, color2],...] 'area_light' : [(2., 3., 40.) ,# location 'White', # color .7, .7, 3, 3], # width, height, Nlamps_x, Nlamps_y 'background' : 'White', # color 'textures' : tex, # Length of atoms list of texture names 'celllinewidth': 0.05, # Radius of the cylinders representing the cell } # Make the color of the glass beads semi-transparent #colors2 = np.zeros((natoms, 4)) #colors2[:, :3] = colors #colors2[:, 3] = 0.95 kwargs['colors'] = colors kwargs.update(extra_kwargs) # Make the raytraced image ase.io.write(args.output, mol, run_povray=True, **kwargs) def main(): args = parse_arguments() generate_image(args) for cmap in maps[:6]: args = parse_arguments(["-T", "--output=ton_{}_t.pov".format(cmap), "--colormap={}".format(cmap), "TON.cif"]) generate_image(args) from IPython.display import Image, HTML, display from glob import glob imagesList=''.join( ["<img style='width: 150px; margin: 0px; float: left; border: 1px solid black;' src='%s' />" % str(s) for s in sorted(glob('ton_*_t.png')) ]) display(HTML(imagesList))
0.712032
0.341637
# Optical Flow Optical flow tracks objects by looking at where the *same* points have moved from one image frame to the next. Let's load in a few example frames of a pacman-like face moving to the right and down and see how optical flow finds **motion vectors** that describe the motion of the face! As usual, let's first import our resources and read in the images. ``` import numpy as np import matplotlib.image as mpimg import matplotlib.pyplot as plt import cv2 %matplotlib inline frame_1 = cv2.imread("pacman_1.png") frame_2 = cv2.imread("pacman_2.png") frame_3 = cv2.imread("pacman_3.png") frame_1 = cv2.cvtColor(frame_1,cv2.COLOR_BGR2RGB) frame_2 = cv2.cvtColor(frame_2,cv2.COLOR_BGR2RGB) frame_3 = cv2.cvtColor(frame_3,cv2.COLOR_BGR2RGB) f,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(20,10)) ax1.set_title("Frame 1") ax1.imshow(frame_1) ax2.set_title("Frame 2") ax2.imshow(frame_2) ax3.set_title("Frame 3") ax3.imshow(frame_3) ``` ## Finding Points to Track Befor optical flow can work, we have to give it a set of *keypoints* to track between two image frames! In the below example, we use a **Shi-Tomasi corner detector**, which uses the same process as a Harris corner detector to find patterns of intensity that make up a "corner" in an image, only it adds an additional parameter that helps select the most prominent corners. You can read more about this detection algorithm in [the documentation](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html). Alternatively, you could choose to use Harris or even ORB to find feature points. I just found that this works well. **You sould see that the detected points appear at the corners of the face.** ``` #parameters features_params= dict ( maxCorners = 10, qualityLevel= 0.2, minDistance = 5, blockSize = 5) gray_1 = cv2.cvtColor(frame_1,cv2.COLOR_RGB2GRAY) gray_2 = cv2.cvtColor(frame_2,cv2.COLOR_RGB2GRAY) gray_3 = cv2.cvtColor(frame_3,cv2.COLOR_RGB2GRAY) pts_1 = cv2.goodFeaturesToTrack(gray_1,mask=None,**features_params) plt.imshow(frame_1) for p in pts_1: plt.plot(p[0][0],p[0][1],'r.',markersize=15) print(pts_1) ``` ## Perform Optical Flow Once we've detected keypoints on our initial image of interest, we can calculate the optical flow between this image frame (frame 1) and the next frame (frame 2), using OpenCV's `calcOpticalFlowPyrLK` which is [documented, here](https://docs.opencv.org/trunk/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323). It takes in an initial image frame, the next image, and the first set of points, and it returns the detected points in the next frame and a value that indicates how good matches are between points from one frame to the next. The parameters also include a window size and maxLevels that indicate the size of a window and mnumber of levels that will be used to scale the given images using pyramid scaling; this version peforms an iterative search for matching points and this matching criteria is reflected in the last parameter (you may need to change these values if you are working with a different image, but these should work for the provided example). ``` lr_params = dict(winSize = (5,5), maxLevel=2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) pts_2,match,err = cv2.calcOpticalFlowPyrLK(frame_1,frame_2,pts_1,None,**lr_params) good_new = pts_2[match==1] good_old = pts_1[match==1] mask = np.zeros_like(frame_2) for i,(new,old) in enumerate(zip(good_new,good_old)): (a,b) = new.ravel() (c,d) = old.ravel() mask=cv2.circle(mask,(a,b),5,(0,255,0),-1) mask = cv2.line(mask,(a,b),(c,d),(0,255,0),3) comp_image = np.copy(frame_2) comp_image[mask!=0]=[0] plt.imshow(comp_image) ``` ### TODO: Perform Optical Flow between image frames 2 and 3 Repeat this process but for the last two image frames; see what the resulting motion vectors look like. Imagine doing this for a series of image frames and plotting the entire-motion-path of a given object. ``` pts_2 = cv2.goodFeaturesToTrack(gray_2,mask=None,**features_params) plt.imshow(frame_2) for p in pts_2: plt.plot(p[0][0],p[0][1],'r.',markersize=15) pts_2 pts_3,match,err = cv2.calcOpticalFlowPyrLK(frame_2,frame_3,pts_2,None,**lr_params) good_new1 = pts_3[match==1] good_old1 = pts_2[match==1] mask1 = np.zeros_like(frame_3) for i,(new,old) in enumerate(zip(good_new1,good_old1)): (e,f) = new.ravel() (g,h) = old.ravel() mask1 = cv2.circle(mask1,(e,f),5,(0,255,0),-1) mask1 = cv2.line(mask1,(e,f),(g,h),(0,255,0),3) comp_image1 = np.copy(frame_3) comp_image1[mask!=0] = [0] plt.imshow(comp_image1) plt.imshow(mask1) ```
github_jupyter
import numpy as np import matplotlib.image as mpimg import matplotlib.pyplot as plt import cv2 %matplotlib inline frame_1 = cv2.imread("pacman_1.png") frame_2 = cv2.imread("pacman_2.png") frame_3 = cv2.imread("pacman_3.png") frame_1 = cv2.cvtColor(frame_1,cv2.COLOR_BGR2RGB) frame_2 = cv2.cvtColor(frame_2,cv2.COLOR_BGR2RGB) frame_3 = cv2.cvtColor(frame_3,cv2.COLOR_BGR2RGB) f,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(20,10)) ax1.set_title("Frame 1") ax1.imshow(frame_1) ax2.set_title("Frame 2") ax2.imshow(frame_2) ax3.set_title("Frame 3") ax3.imshow(frame_3) #parameters features_params= dict ( maxCorners = 10, qualityLevel= 0.2, minDistance = 5, blockSize = 5) gray_1 = cv2.cvtColor(frame_1,cv2.COLOR_RGB2GRAY) gray_2 = cv2.cvtColor(frame_2,cv2.COLOR_RGB2GRAY) gray_3 = cv2.cvtColor(frame_3,cv2.COLOR_RGB2GRAY) pts_1 = cv2.goodFeaturesToTrack(gray_1,mask=None,**features_params) plt.imshow(frame_1) for p in pts_1: plt.plot(p[0][0],p[0][1],'r.',markersize=15) print(pts_1) lr_params = dict(winSize = (5,5), maxLevel=2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) pts_2,match,err = cv2.calcOpticalFlowPyrLK(frame_1,frame_2,pts_1,None,**lr_params) good_new = pts_2[match==1] good_old = pts_1[match==1] mask = np.zeros_like(frame_2) for i,(new,old) in enumerate(zip(good_new,good_old)): (a,b) = new.ravel() (c,d) = old.ravel() mask=cv2.circle(mask,(a,b),5,(0,255,0),-1) mask = cv2.line(mask,(a,b),(c,d),(0,255,0),3) comp_image = np.copy(frame_2) comp_image[mask!=0]=[0] plt.imshow(comp_image) pts_2 = cv2.goodFeaturesToTrack(gray_2,mask=None,**features_params) plt.imshow(frame_2) for p in pts_2: plt.plot(p[0][0],p[0][1],'r.',markersize=15) pts_2 pts_3,match,err = cv2.calcOpticalFlowPyrLK(frame_2,frame_3,pts_2,None,**lr_params) good_new1 = pts_3[match==1] good_old1 = pts_2[match==1] mask1 = np.zeros_like(frame_3) for i,(new,old) in enumerate(zip(good_new1,good_old1)): (e,f) = new.ravel() (g,h) = old.ravel() mask1 = cv2.circle(mask1,(e,f),5,(0,255,0),-1) mask1 = cv2.line(mask1,(e,f),(g,h),(0,255,0),3) comp_image1 = np.copy(frame_3) comp_image1[mask!=0] = [0] plt.imshow(comp_image1) plt.imshow(mask1)
0.144269
0.985243
# Implementing the Gradient Descent Algorithm In this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data. ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd #Some helper functions for plotting and drawing lines def plot_points(X, y): admitted = X[np.argwhere(y==1)] rejected = X[np.argwhere(y==0)] plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k') plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k') def display(m, b, color='g--'): plt.xlim(-0.05,1.05) plt.ylim(-0.05,1.05) x = np.arange(-10, 10, 0.1) plt.plot(x, m*x+b, color) ``` ## Reading and plotting the data ``` data = pd.read_csv('data.csv', header=None) X = np.array(data[[0,1]]) y = np.array(data[2]) plot_points(X,y) plt.show() ``` ## TODO: Implementing the basic functions Here is your turn to shine. Implement the following formulas, as explained in the text. - Sigmoid activation function $$\sigma(x) = \frac{1}{1+e^{-x}}$$ - Output (prediction) formula $$\hat{y} = \sigma(w_1 x_1 + w_2 x_2 + b)$$ - Error function $$Error(y, \hat{y}) = - y \log(\hat{y}) - (1-y) \log(1-\hat{y})$$ - The function that updates the weights $$ w_i \longrightarrow w_i + \alpha (y - \hat{y}) x_i$$ $$ b \longrightarrow b + \alpha (y - \hat{y})$$ ``` # Implement the following functions # Activation (sigmoid) function def sigmoid(x): return(1/(1+np.exp(-x))) # Output (prediction) formula def output_formula(features, weights, bias): return sigmoid(np.dot(features, weights) + bias) # Error (log-loss) formula def error_formula(y, output): return -y*np.log(output)-(1-y)*np.log(1-output) # Gradient descent step def update_weights(x, y, weights, bias, learnrate): output = output_formula(x, weights, bias) d_error = y - output weights += learnrate * d_error * x bias += learnrate * d_error return weights, bias ``` ## Training function This function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm. ``` np.random.seed(44) epochs = 100 learnrate = 0.01 def train(features, targets, epochs, learnrate, graph_lines=False): errors = [] n_records, n_features = features.shape last_loss = None weights = np.random.normal(scale=1 / n_features**.5, size=n_features) bias = 0 for e in range(epochs): del_w = np.zeros(weights.shape) for x, y in zip(features, targets): output = output_formula(x, weights, bias) error = error_formula(y, output) weights, bias = update_weights(x, y, weights, bias, learnrate) # Printing out the log-loss error on the training set out = output_formula(features, weights, bias) loss = np.mean(error_formula(targets, out)) errors.append(loss) if e % (epochs / 10) == 0: print("\n========== Epoch", e,"==========") if last_loss and last_loss < loss: print("Train loss: ", loss, " WARNING - Loss Increasing") else: print("Train loss: ", loss) last_loss = loss predictions = out > 0.5 accuracy = np.mean(predictions == targets) print("Accuracy: ", accuracy) if graph_lines and e % (epochs / 100) == 0: display(-weights[0]/weights[1], -bias/weights[1]) # Plotting the solution boundary plt.title("Solution boundary") display(-weights[0]/weights[1], -bias/weights[1], 'black') # Plotting the data plot_points(features, targets) plt.show() # Plotting the error plt.title("Error Plot") plt.xlabel('Number of epochs') plt.ylabel('Error') plt.plot(errors) plt.show() ``` ## Time to train the algorithm! When we run the function, we'll obtain the following: - 10 updates with the current training loss and accuracy - A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs. - A plot of the error function. Notice how it decreases as we go through more epochs. ``` train(X, y, epochs, learnrate, True) data = pd.read_csv('data.csv', header=None) X = np.array(data[[0,1]]) y = np.array(data[2]) plot_points(X,y) plt.show() ```
github_jupyter
import matplotlib.pyplot as plt import numpy as np import pandas as pd #Some helper functions for plotting and drawing lines def plot_points(X, y): admitted = X[np.argwhere(y==1)] rejected = X[np.argwhere(y==0)] plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k') plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k') def display(m, b, color='g--'): plt.xlim(-0.05,1.05) plt.ylim(-0.05,1.05) x = np.arange(-10, 10, 0.1) plt.plot(x, m*x+b, color) data = pd.read_csv('data.csv', header=None) X = np.array(data[[0,1]]) y = np.array(data[2]) plot_points(X,y) plt.show() # Implement the following functions # Activation (sigmoid) function def sigmoid(x): return(1/(1+np.exp(-x))) # Output (prediction) formula def output_formula(features, weights, bias): return sigmoid(np.dot(features, weights) + bias) # Error (log-loss) formula def error_formula(y, output): return -y*np.log(output)-(1-y)*np.log(1-output) # Gradient descent step def update_weights(x, y, weights, bias, learnrate): output = output_formula(x, weights, bias) d_error = y - output weights += learnrate * d_error * x bias += learnrate * d_error return weights, bias np.random.seed(44) epochs = 100 learnrate = 0.01 def train(features, targets, epochs, learnrate, graph_lines=False): errors = [] n_records, n_features = features.shape last_loss = None weights = np.random.normal(scale=1 / n_features**.5, size=n_features) bias = 0 for e in range(epochs): del_w = np.zeros(weights.shape) for x, y in zip(features, targets): output = output_formula(x, weights, bias) error = error_formula(y, output) weights, bias = update_weights(x, y, weights, bias, learnrate) # Printing out the log-loss error on the training set out = output_formula(features, weights, bias) loss = np.mean(error_formula(targets, out)) errors.append(loss) if e % (epochs / 10) == 0: print("\n========== Epoch", e,"==========") if last_loss and last_loss < loss: print("Train loss: ", loss, " WARNING - Loss Increasing") else: print("Train loss: ", loss) last_loss = loss predictions = out > 0.5 accuracy = np.mean(predictions == targets) print("Accuracy: ", accuracy) if graph_lines and e % (epochs / 100) == 0: display(-weights[0]/weights[1], -bias/weights[1]) # Plotting the solution boundary plt.title("Solution boundary") display(-weights[0]/weights[1], -bias/weights[1], 'black') # Plotting the data plot_points(features, targets) plt.show() # Plotting the error plt.title("Error Plot") plt.xlabel('Number of epochs') plt.ylabel('Error') plt.plot(errors) plt.show() train(X, y, epochs, learnrate, True) data = pd.read_csv('data.csv', header=None) X = np.array(data[[0,1]]) y = np.array(data[2]) plot_points(X,y) plt.show()
0.783409
0.983247
# 多元高斯分布 一个向量形式的随机变量$X=\left[X_1\cdots X_n\right]^T$,期望为$\mu\in\mathbb R^n$,协方差矩阵为$\varSigma\in\mathbb S_{++}^n$(在[线性代数](sn01.ipynb)笔记中$\mathbb S_{++}^n$为$n\times n$正定对称矩阵空间,具体定义为$\mathbb S_{++}^n=\left\{A\in\mathbb R^{n\times n}: A=A^T,\ \forall x\in\mathbb R^n\land x\neq0\to x^TAx\gt0\right\}$),如果随机变量的概率密度函数(这篇笔记中我们使用$p(\bullet)$表示概率密度函数,代替[概率论](sn02.ipynb)笔记中的$f_X(\bullet)$)能够定义为: $$ p\left(x;\mu,\varSigma\right)=\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\exp\left(-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)\right) $$ 我们就称该随机变量服从**多元正态(高斯)分布(multivariate normal (or Gaussian) distribution)**,写作$X\sim\mathcal N\left(\mu,\varSigma\right)$。在这篇笔记中,我们简要讨论一下多元高斯分布的基本属性。 ## 1. 与单变量高斯分布的关系 回忆**单变量正态分布(a univariate normal (or Gaussian) distribution)**的概率密度函数: $$ P\left(x;\mu,\sigma^2\right)=\frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{1}{2\sigma^2}\left(x-\mu\right)^2\right) $$ 式中指数函数的参数$-\frac{1}{2\sigma^2}\left(x-\mu\right)^2$是一个关于$x$的二次函数,其二次项系数为负,是一个开口向下的抛物线(parabola)。而整个式子的系数$\frac{1}{\sqrt{2\pi}\sigma}$是一个常量,不依靠变量$x$,因此我们可以把它简单的看做是一个保证$\displaystyle\frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^{\infty}\exp\left(-\frac{1}{2\sigma^2}\left(x-\mu\right)^2\right)=1$的“标准化因子”。 <img src="./resource/sn07_image01.png" width="800" alt="" align=center /> 左图为关于$X$的单变量高斯分布的概率密度,右图为关于$X_1,X_2$的多元高斯分布的概率密度。 在多元高斯分布中,指数函数的参数$-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)$是关于$x$的二次型。因为$\varSigma$是正定的(正定矩阵的逆矩阵仍是正定矩阵),则对于任意非零向量$z$有$z^T\varSigma^{-1}z\gt0$。这意味着对于任意$x\neq\mu$有: $$ \begin{align} \left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)&\gt0\\ -\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)&\lt0 \end{align} $$ 类似单变量中的情形,我们可以把指数函数的参数想象成一个开口向下的抛物面。而整个式子的系数$\displaystyle\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}$比单变量情形下的系数更加复杂,不过它依然不依靠$x$,所以我们仍然可以将其看做一个用来保证$\displaystyle\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}\cdots\int_{-\infty}^{\infty}\exp\left(-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)\right)\mathrm dx_1\mathrm dx_2\cdots\mathrm dx_n=1$的“标准化因子”。 ## 2. 协方差矩阵 理解协方差矩阵的概念是掌握多元高斯分布的关键。回忆对于一对随机变量$X,Y$,其**协方差(covariance)**定义为: $$ \mathrm{Cov}[X,Y]=\mathrm E\left[(X-\mathrm E[X])(Y-\mathrm E[Y])\right]=\mathrm E[XY]-\mathrm E[X]\mathrm E[Y] $$ 在处理多个变量时,协方差矩阵提供了一种对所有“变量对”协方差的简明表达。具体的讲,协方差矩阵(通常记为$\varSigma$)是一个$n\times n$矩阵,它的第$(i,j)$个元素为$\mathrm{Cov}[X_i,X_j]$。 下列命题为我们提供了另一种关于随机变量$X$的协方差矩阵的描述(证明见附录A.1): **命题1:**对于任意一个期望为$\mu$协方差矩阵为$\varSigma$的随机变量$X$,有: $$ \varSigma=\mathrm E\left[(X-\mu)(X-\mu)^T\right]=\mathrm E\left[XX^T\right]-\mu\mu^T\tag{1} $$ 在多元高斯分布的定义中,我们要求协方差矩阵$\varSigma$是一个对称正定矩阵(即$\varSigma\in\mathbb S_{++}^n$)。为什么需要这样的约束条件?在下面的命题中可以看到,*任意*随机向量的协方差矩阵必须是对称半正定矩阵: **命题2:**假设$\varSigma$是某个随机向量$X$的协方差矩阵,则$\varSigma$一定是对称半正定矩阵。 **证明:**从$\varSigma$的定义中可以直接看到起对称性($\varSigma=\varSigma^T$),接下来我们证明它是半正定矩阵,对于任意$z\in\mathbb R^n$: $$ \begin{align} z^T\varSigma z&=\sum_{i=1}^n\sum_{j=1}^n\left(\varSigma_{ij}z_iz_j\right)\tag{2}\\ &=\sum_{i=1}^n\sum_{j=1}^n\left(\mathrm{Cov}\left[X_i,X_j\right]\cdot z_iz_j\right)\\ &=\sum_{i=1}^n\sum_{j=1}^n\left(\mathrm E\left[\left(X_i-\mathrm E\left[X_i\right]\right)\left(X_j-\mathrm E\left[X_j\right]\right)\right]\cdot z_iz_j\right)\\ &=\mathrm E\left[\sum_{i=1}^n\sum_{j=1}^n\left(X_i-\mathrm E\left[X_i\right]\right)\left(X_j-\mathrm E\left[X_j\right]\cdot z_iz_j\right)\right]\tag{3} \end{align} $$ $(2)$式就是二次型展开后的样子(见[线性代数](sn01.ipynb)笔记),$(3)$式利用了期望的线性性质(见[概率论](sn02.ipynb)笔记)。 为了完成证明,现在观察中括号内的项形为$\sum_i\sum_jx_ix_jz_iz_j=\left(x^Tz\right)^2\geq0$(参考[问题集1](cs229.stanford.edu/materials/ps1.pdf))。因此,期望括号内的这个量总是非负的,则期望本身总是非负的,即$z^T\varSigma z\geq0$。 上面的命题证明了一个合法的协方差矩阵$\varSigma$总是对称半正定的。而为了使$\varSigma^{-1}$存在(出现在多元高斯分布定义式中),则$\varSigma$必须是可逆的(即满秩)。由于任意满秩的对称半正定矩阵必定是对称正定矩阵,则有$\varSigma$一定是对称正定矩阵。 ## 3. 对角协方差矩阵的情形 为了对多元高斯分布有一个直观认识,我们来看一个简单的例子,$n=2$且协方差矩阵$\varSigma$为对角矩阵时: $$ x=\begin{bmatrix}x_1\\x_2\end{bmatrix}\qquad\mu=\begin{bmatrix}\mu_1\\\mu_2\end{bmatrix}\qquad\varSigma=\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix} $$ 在这样的条件下,多元高斯分布的概率密度为: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\begin{vmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{vmatrix}^{1/2}}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix}^{-1}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}\right)\\ &=\frac{1}{2\pi\left(\sigma_1^2\cdot\sigma_2^2-0\cdot0\right)^{1/2}}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\frac{1}{\sigma_1^2}&0\\0&\frac{1}{\sigma_2^2}\end{bmatrix}^{-1}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}\right) \end{align} $$ 这一步使用了$2\times 2$矩阵行列式的计算式$\begin{vmatrix}a&b\\c&d\end{vmatrix}=ad-bc$;而对角矩阵求逆就是将对角线上的每个元素求导。继续我们的演算: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\frac{1}{\sigma_1^2}\left(x_1-\mu_1\right)\\\frac{1}{\sigma_2^2}\left(x_2-\mu_2\right)\end{bmatrix}\right)\\ &=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ &=\frac{1}{2\pi\sigma_1}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2\right)\cdot\frac{1}{2\pi\sigma_2}\exp\left(-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right) \end{align} $$ 很容易看出最后一个式子是两个独立的高斯分布的乘积,其中一个期望为$\mu_1$方差为$\sigma_1^2$,另一个期望为$\mu_2$方差为$\sigma_2^2$。推广到一般情况,一个期望为$\mu\in\mathbb R^n$、协方差为对角矩阵$\mathrm{diag}\left(\sigma_1^2,\sigma_2^2,\cdots,\sigma_n^2\right)$的$n$维高斯分布,与一组由$n$个相互独立的以$\mu_i$为期望、$\sigma_i^2$为方差的高斯分布组成的分布是相同的。 ## 4. Isocontours 另一种了解多元高斯分布概念的方法是理解它的**isocontours**。对于函数$f:\mathbb R^2\to\mathbb R$,其等值线是一个集合$\left\{x\in\mathbb R^2: f(x)=c, c\in\mathbb R\right\}$(isocontours也称作level curves。一般的函数$f:\mathbb R^n\to\mathbb R$的**level set**是一个形为$\left\{x\in\mathbb R^2: f(x)=c, c\in\mathbb R\right\}$的集合)。 ### 4.1 Isocontours的形状 多元高斯分布的isocontours是什么样的?我们继续使用前面简单的例子,$n=1$且$\varSigma$为对角矩阵: $$ x=\begin{bmatrix}x_1\\x_2\end{bmatrix}\qquad\mu=\begin{bmatrix}\mu_1\\\mu_2\end{bmatrix}\qquad\varSigma=\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix} $$ 上一小节的最后我们得到了: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\tag{4} \end{align} $$ 现在来考虑一下$p\left(x;\mu,\varSigma\right)=c$时由平面上所有点组成的level set,其中$c\in\mathbb R$为某些常数。计算所有$x_1,x_2\in\mathbb R$: $$ \begin{align} c&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ 2\pi c\sigma_1\sigma_2&=\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ \log\left(2\pi c\sigma_1\sigma_2\right)&=-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\\ \log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)&=\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2+\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\\ 1&=\frac{\left(x_1-\mu_1\right)^2}{2\sigma_1^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)}+\frac{\left(x_2-\mu_2\right)^2}{2\sigma_2^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)} \end{align} $$ 定义$\displaystyle r_1=\sqrt{2\sigma_1^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)},\ r_2=\sqrt{2\sigma_2^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)}$,则有: $$ 1=\left(\frac{x_1-\mu_1}{r_1}\right)^2+\left(\frac{x_2-\mu_2}{r_2}\right)^2\tag{5} $$ $(5)$式就是高中解析几何讲过的长短轴与坐标轴平行的**椭圆(axis-aligned ellipse)**,其中心位于$\left(\mu_1,\mu_2\right)$,与$x_1$平行的轴长为$2r_1$,与$x_2$平行的轴长为$2r_2$。 ## 4.2 轴长 <img src="./resource/sn07_image02.png" width="800" alt="" align=center /> (左图为以$\mu=\begin{bmatrix}3\\2\end{bmatrix}$为期望、以对角矩阵$\varSigma=\begin{bmatrix}25&0\\0&9\end{bmatrix}$为协方差矩阵的概率密度函数的热力图。可以看到椭圆的中心位于$(3,2)$,椭圆的长短轴之比为$5:3$。右图为以$\mu=\begin{bmatrix}3\\2\end{bmatrix}$为期望、以对角矩阵$\varSigma=\begin{bmatrix}10&5\\5&5\end{bmatrix}$为协方差矩阵的概率密度函数的热力图,它的长短轴并不与坐标轴垂直。椭圆的中心仍在$(3,2)$,但是长短轴被某个线性变换旋转了一个角度。) 为了更好的理解level curves的形状是如何随着多元高斯分布的随机变量的改变而改变的,假设我们对$c$取高斯分布概率密度峰值$1/e$时的$r_1,r_2$感兴趣。 先观察$(4)$式的最大值,此时$x_1=\mu_1,x_2=\mu_2$,代回$(4)$式得到高斯分布概率密度的峰值为$\frac{1}{2\pi\sigma_1\sigma_2}$。 再令$c=\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)$,带入$r_1,r_2$求得: $$ \begin{eqnarray} r_1&=\sqrt{2\sigma_1^2\log\left(\frac{1}{2\pi\sigma_1\sigma_2\cdot\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)}\right)}&=\sigma_1\sqrt 2\\ r_2&=\sqrt{2\sigma_2^2\log\left(\frac{1}{2\pi\sigma_1\sigma_2\cdot\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)}\right)}&=\sigma_2\sqrt 2 \end{eqnarray} $$ 从这里可以看出,对于第$i$个维度(分量$x_i$),使其概率达到高斯分布概率密度峰值的$1/e$的相应轴长($r_i$)与该维度的相应标准差$\sigma_i$呈正比。从直觉上讲这也是对的:随机变量$x_i$的方差越小,则在该维度上高斯分布图像的峰值就越“紧凑”,于是反映在isocontours上椭圆在该维度上的轴$r_i$就越短。 ## 4.3 非对角协方差矩阵及高维情形 很明显,上面的推导依赖于$\varSigma$是对角矩阵这一假设,不过即使在非对角矩阵的情形下,推导的结论也有相似之处。推广到一般情况下,isocontours的图像不再是长短轴与坐标轴平行的椭圆了,现在的椭圆这是被**旋转**了一个角度而已。再推广到高维情形下,在$n$维环境中,只有level set的几何形态变成了$\mathbb R^n$中的椭球面而已。 ## 5. 线性变换的解释 前几个小节我们主要关注对具有角型协方差矩阵的多元高斯分布是如何变化的。我们还发现具有对角型协方差矩阵的$n$维多元高斯分布其实可以被看做是以$\mu_i$为期望、$\sigma_i^2$为方差的$n$个相互独立的高斯分布的随机变量组成的分布。在这一小节,我们再从变量值的角度解释一下非对角协方差矩阵的情形。 这一小节的关键在于下面这个定理(证明见附录A.2中): **定理1:**令$X\sim\mathcal N\left(\mu,\varSigma\right),\ \mu\in\mathbb R^n,\ \sigma\in\mathbb S_{++}^n$,则存在矩阵$B\in\mathbb R^n$,若定义$Z=B^{-1}X(X-\mu)$,则$Z\sim\mathcal N(0,I)$。 注意到如果$Z\sim\mathcal N(0,I)$,利用第4节的知识,则$Z$可以被看做是由$n$个相互独立的标准正态分布($Z_i\sim\mathcal N(0,1)$)组成的。再进一步,如果$Z=B^{-1}(X-\mu)$,则用简单的代数就可以知道$X=BZ+\mu$。 因此,这个定理指出,任何服从多元高斯分布的随机变量$X$都能够通过一个线性变换($X=BZ+\mu$)分解为$n$个相互独立的标准正态分布。 ## 附录A.1 我们来证明$(1)$式的前一个等式 (后一个等式直接展开即可得到$\mathrm E\left[(X-\mu)(X-\mu)^T\right]=\mathrm E\left[XX^T-X\mu^T-\mu X^T+\mu\mu^T\right]=\mathrm E\left[XX^T\right]-\mathrm E[X]\mu^T-\mu\mathrm E\left[X^T\right]+\mu\mu^T=\mathrm E\left[XX^T\right]-\mu\mu^T$,注意$E[X]=\mu$,而常数的期望是常数。) $$ \begin{align} \varSigma&= \begin{bmatrix} \mathrm{Cov}[X_1,X_1]&\cdots&\mathrm{Cov}[X_1,X_n]\\ \vdots&\ddots&\vdots\\ \mathrm{Cov}[X_n,X_1]&\cdots&\mathrm{Cov}[X_n,X_n] \end{bmatrix}\\ &=\begin{bmatrix} \mathrm{E}\left[(X_1-\mu_1)^2\right]&\cdots&\mathrm{E}\left[(X_1-\mu_1)(X_n-\mu_n)\right]\\ \vdots&\ddots&\vdots\\ \mathrm{E}\left[(X_n-\mu_n)(X_1-\mu_1)\right]&\cdots&\mathrm{E}\left[(X_n-\mu_n)^2\right] \end{bmatrix}\\ &=\mathrm{E}\begin{bmatrix} (X_1-\mu_1)^2&\cdots&(X_1-\mu_1)(X_n-\mu_n)\\ \vdots&\ddots&\vdots\\ (X_n-\mu_n)(X_1-\mu_1)&\cdots&(X_n-\mu_n)^2 \end{bmatrix}\tag{6}\\ &=\mathrm E\begin{bmatrix}\begin{bmatrix}X_1-\mu_1\\\vdots\\X_n-\mu_n\end{bmatrix}\begin{bmatrix}X_1-\mu_1\cdots X_n-\mu_n\end{bmatrix}\end{bmatrix}\tag{7}\\ &=\mathrm E\left[(X-\mu)(X-\mu)^T\right] \end{align} $$ $(6)$式的根据是矩阵的期望就是对矩阵每一个元素取期望,而$(7)$式的根据是向量乘法: $$ zz^T=\begin{bmatrix}z_1\\z_2\\\vdots\\z_n\end{bmatrix}\begin{bmatrix}z_1&z_2&\cdots&z_n\end{bmatrix} =\begin{bmatrix}z_1z_1&z_1z_2&\cdots&z_1z_n\\z_2z_1&z_2z_2&\cdots&z_2z_n\\\vdots&\vdots&\ddots&\vdots\\z_nz_1&z_nz_2&\cdots&z_nz_n\end{bmatrix} $$ ## 附录A.2 **证明定理1:**令$X\sim\mathcal N\left(\mu,\varSigma\right),\ \mu\in\mathbb R^n,\ \sigma\in\mathbb S_{++}^n$,则存在矩阵$B\in\mathbb R^n$,若定义$Z=B^{-1}X(X-\mu)$,则$Z\sim\mathcal N(0,I)$。 证明分为两步:我们先要证明$\varSigma$可以被分解为$\varSigma=BB^T$的形式,其中$B$是某个可逆矩阵;之后再将随机变量$X$使用线性变换$Z=B^{-1}(X-\mu)$变为随机变量$Z$。 **第一步:分解协方差矩阵。**回忆[线性代数](sn01.ipynb)笔记中关于对称矩阵的两个性质(见“对称矩阵的特征值与特征向量”一节): 1. 任意实对称矩阵$A\in\mathbb R^{n\times n}$必定能被写成$A=U\varLambda U^T$的形式,其中$U$是一个满秩正交矩阵,每一列都来自$A$的特征向量;$\varLambda$是一个对角矩阵,对角线元素均来自$A$的特征值。 2. 如果$A$是对称正定矩阵,则$A$的特征值均为正值。 由于协方差矩阵$\varSigma$是一个正定矩阵,则根据性质1就可以使用恰当的$U,\varLambda$将矩阵分解为$A=U\varLambda U^T$。再根据第二个性质,可以定义矩阵$\varLambda^{1/2}\in\mathbb R^{n\times n}$,该对角矩阵中对角线元素皆为原$\varLambda$对角线元素的平方根。所以有$\varLambda=\varLambda^{1/2}\left(\varLambda^{1/2}\right)^T$,那么可以将$\varSigma$进一步分解为: $$ \varSigma= U\varLambda U^T= U\varLambda^{1/2}\left(\varLambda^{1/2}\right)^TU^T= U\varLambda^{1/2}\left(U\varLambda^{1/2}\right)^T= BB^T $$ 其中$B=U\varLambda^{1/2}$。(关于$B$是可逆矩阵:很明显正交矩阵$U$是可逆的,而可逆矩阵$U$右乘满秩对角矩阵后,仅会对$U$每列的大小产生影响,并不会改变$U$的秩,得证。)于是可以得到$\varSigma^{-1}=B^{-T}B^{-1}$,将其代入多元高斯分布的概率密度函数: $$ p\left(x;\mu,\varSigma\right)=\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\exp\left(-\frac{1}{2}\left(x-\mu\right)^TB^{-T}B^{-1}\left(x-\mu\right)\right)\tag{8} $$ **第二步:改变随机变量。**定义向量形式的随机变量$Z=B^{-1}(X-\mu)$。介绍一个概率论基本公式(并没有在概率论的笔记中出现),用来描述原随机变量与变更后随机变量间的关系: * 设随机变量$X=[X_1,\cdots,X_n]^T\in\mathbb R^n$是一个向量形随机变量,其联合概率密度函数为$f_X:\mathbb R^n\to \mathbb R$。若$Z=H(X)\in\mathbb R^n$,其中$H$是一个双射可微函数,则随机变量$Z$的联合概率密度函数为$f_Z:\mathbb R^n\to\mathbb R$,其中$f_z$定义为: $$ f_Z(z)=f_X(x)\cdot \left\lvert \det\left( \begin{bmatrix} \frac{\partial x_1}{\partial z_1}&\cdots&\frac{\partial x_1}{\partial z_n}\\ \vdots&\ddots&\vdots\\ \frac{\partial x_n}{\partial z_1}&\cdots&\frac{\partial x_n}{\partial z_n} \end{bmatrix} \right) \right\rvert $$ 使用改变随机变量的公式,(此处跳过线性代数计算)可以发现随机变量$Z$具有如下的联合概率密度: $$ p_Z(z)=\frac{1}{(2\pi)^{n/2}}\exp\left(-\frac{1}{2}z^Tz\right)\tag{9} $$ 得证。
github_jupyter
# 多元高斯分布 一个向量形式的随机变量$X=\left[X_1\cdots X_n\right]^T$,期望为$\mu\in\mathbb R^n$,协方差矩阵为$\varSigma\in\mathbb S_{++}^n$(在[线性代数](sn01.ipynb)笔记中$\mathbb S_{++}^n$为$n\times n$正定对称矩阵空间,具体定义为$\mathbb S_{++}^n=\left\{A\in\mathbb R^{n\times n}: A=A^T,\ \forall x\in\mathbb R^n\land x\neq0\to x^TAx\gt0\right\}$),如果随机变量的概率密度函数(这篇笔记中我们使用$p(\bullet)$表示概率密度函数,代替[概率论](sn02.ipynb)笔记中的$f_X(\bullet)$)能够定义为: $$ p\left(x;\mu,\varSigma\right)=\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\exp\left(-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)\right) $$ 我们就称该随机变量服从**多元正态(高斯)分布(multivariate normal (or Gaussian) distribution)**,写作$X\sim\mathcal N\left(\mu,\varSigma\right)$。在这篇笔记中,我们简要讨论一下多元高斯分布的基本属性。 ## 1. 与单变量高斯分布的关系 回忆**单变量正态分布(a univariate normal (or Gaussian) distribution)**的概率密度函数: $$ P\left(x;\mu,\sigma^2\right)=\frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{1}{2\sigma^2}\left(x-\mu\right)^2\right) $$ 式中指数函数的参数$-\frac{1}{2\sigma^2}\left(x-\mu\right)^2$是一个关于$x$的二次函数,其二次项系数为负,是一个开口向下的抛物线(parabola)。而整个式子的系数$\frac{1}{\sqrt{2\pi}\sigma}$是一个常量,不依靠变量$x$,因此我们可以把它简单的看做是一个保证$\displaystyle\frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^{\infty}\exp\left(-\frac{1}{2\sigma^2}\left(x-\mu\right)^2\right)=1$的“标准化因子”。 <img src="./resource/sn07_image01.png" width="800" alt="" align=center /> 左图为关于$X$的单变量高斯分布的概率密度,右图为关于$X_1,X_2$的多元高斯分布的概率密度。 在多元高斯分布中,指数函数的参数$-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)$是关于$x$的二次型。因为$\varSigma$是正定的(正定矩阵的逆矩阵仍是正定矩阵),则对于任意非零向量$z$有$z^T\varSigma^{-1}z\gt0$。这意味着对于任意$x\neq\mu$有: $$ \begin{align} \left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)&\gt0\\ -\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)&\lt0 \end{align} $$ 类似单变量中的情形,我们可以把指数函数的参数想象成一个开口向下的抛物面。而整个式子的系数$\displaystyle\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}$比单变量情形下的系数更加复杂,不过它依然不依靠$x$,所以我们仍然可以将其看做一个用来保证$\displaystyle\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}\cdots\int_{-\infty}^{\infty}\exp\left(-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)\right)\mathrm dx_1\mathrm dx_2\cdots\mathrm dx_n=1$的“标准化因子”。 ## 2. 协方差矩阵 理解协方差矩阵的概念是掌握多元高斯分布的关键。回忆对于一对随机变量$X,Y$,其**协方差(covariance)**定义为: $$ \mathrm{Cov}[X,Y]=\mathrm E\left[(X-\mathrm E[X])(Y-\mathrm E[Y])\right]=\mathrm E[XY]-\mathrm E[X]\mathrm E[Y] $$ 在处理多个变量时,协方差矩阵提供了一种对所有“变量对”协方差的简明表达。具体的讲,协方差矩阵(通常记为$\varSigma$)是一个$n\times n$矩阵,它的第$(i,j)$个元素为$\mathrm{Cov}[X_i,X_j]$。 下列命题为我们提供了另一种关于随机变量$X$的协方差矩阵的描述(证明见附录A.1): **命题1:**对于任意一个期望为$\mu$协方差矩阵为$\varSigma$的随机变量$X$,有: $$ \varSigma=\mathrm E\left[(X-\mu)(X-\mu)^T\right]=\mathrm E\left[XX^T\right]-\mu\mu^T\tag{1} $$ 在多元高斯分布的定义中,我们要求协方差矩阵$\varSigma$是一个对称正定矩阵(即$\varSigma\in\mathbb S_{++}^n$)。为什么需要这样的约束条件?在下面的命题中可以看到,*任意*随机向量的协方差矩阵必须是对称半正定矩阵: **命题2:**假设$\varSigma$是某个随机向量$X$的协方差矩阵,则$\varSigma$一定是对称半正定矩阵。 **证明:**从$\varSigma$的定义中可以直接看到起对称性($\varSigma=\varSigma^T$),接下来我们证明它是半正定矩阵,对于任意$z\in\mathbb R^n$: $$ \begin{align} z^T\varSigma z&=\sum_{i=1}^n\sum_{j=1}^n\left(\varSigma_{ij}z_iz_j\right)\tag{2}\\ &=\sum_{i=1}^n\sum_{j=1}^n\left(\mathrm{Cov}\left[X_i,X_j\right]\cdot z_iz_j\right)\\ &=\sum_{i=1}^n\sum_{j=1}^n\left(\mathrm E\left[\left(X_i-\mathrm E\left[X_i\right]\right)\left(X_j-\mathrm E\left[X_j\right]\right)\right]\cdot z_iz_j\right)\\ &=\mathrm E\left[\sum_{i=1}^n\sum_{j=1}^n\left(X_i-\mathrm E\left[X_i\right]\right)\left(X_j-\mathrm E\left[X_j\right]\cdot z_iz_j\right)\right]\tag{3} \end{align} $$ $(2)$式就是二次型展开后的样子(见[线性代数](sn01.ipynb)笔记),$(3)$式利用了期望的线性性质(见[概率论](sn02.ipynb)笔记)。 为了完成证明,现在观察中括号内的项形为$\sum_i\sum_jx_ix_jz_iz_j=\left(x^Tz\right)^2\geq0$(参考[问题集1](cs229.stanford.edu/materials/ps1.pdf))。因此,期望括号内的这个量总是非负的,则期望本身总是非负的,即$z^T\varSigma z\geq0$。 上面的命题证明了一个合法的协方差矩阵$\varSigma$总是对称半正定的。而为了使$\varSigma^{-1}$存在(出现在多元高斯分布定义式中),则$\varSigma$必须是可逆的(即满秩)。由于任意满秩的对称半正定矩阵必定是对称正定矩阵,则有$\varSigma$一定是对称正定矩阵。 ## 3. 对角协方差矩阵的情形 为了对多元高斯分布有一个直观认识,我们来看一个简单的例子,$n=2$且协方差矩阵$\varSigma$为对角矩阵时: $$ x=\begin{bmatrix}x_1\\x_2\end{bmatrix}\qquad\mu=\begin{bmatrix}\mu_1\\\mu_2\end{bmatrix}\qquad\varSigma=\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix} $$ 在这样的条件下,多元高斯分布的概率密度为: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\begin{vmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{vmatrix}^{1/2}}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix}^{-1}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}\right)\\ &=\frac{1}{2\pi\left(\sigma_1^2\cdot\sigma_2^2-0\cdot0\right)^{1/2}}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\frac{1}{\sigma_1^2}&0\\0&\frac{1}{\sigma_2^2}\end{bmatrix}^{-1}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}\right) \end{align} $$ 这一步使用了$2\times 2$矩阵行列式的计算式$\begin{vmatrix}a&b\\c&d\end{vmatrix}=ad-bc$;而对角矩阵求逆就是将对角线上的每个元素求导。继续我们的演算: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\frac{1}{\sigma_1^2}\left(x_1-\mu_1\right)\\\frac{1}{\sigma_2^2}\left(x_2-\mu_2\right)\end{bmatrix}\right)\\ &=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ &=\frac{1}{2\pi\sigma_1}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2\right)\cdot\frac{1}{2\pi\sigma_2}\exp\left(-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right) \end{align} $$ 很容易看出最后一个式子是两个独立的高斯分布的乘积,其中一个期望为$\mu_1$方差为$\sigma_1^2$,另一个期望为$\mu_2$方差为$\sigma_2^2$。推广到一般情况,一个期望为$\mu\in\mathbb R^n$、协方差为对角矩阵$\mathrm{diag}\left(\sigma_1^2,\sigma_2^2,\cdots,\sigma_n^2\right)$的$n$维高斯分布,与一组由$n$个相互独立的以$\mu_i$为期望、$\sigma_i^2$为方差的高斯分布组成的分布是相同的。 ## 4. Isocontours 另一种了解多元高斯分布概念的方法是理解它的**isocontours**。对于函数$f:\mathbb R^2\to\mathbb R$,其等值线是一个集合$\left\{x\in\mathbb R^2: f(x)=c, c\in\mathbb R\right\}$(isocontours也称作level curves。一般的函数$f:\mathbb R^n\to\mathbb R$的**level set**是一个形为$\left\{x\in\mathbb R^2: f(x)=c, c\in\mathbb R\right\}$的集合)。 ### 4.1 Isocontours的形状 多元高斯分布的isocontours是什么样的?我们继续使用前面简单的例子,$n=1$且$\varSigma$为对角矩阵: $$ x=\begin{bmatrix}x_1\\x_2\end{bmatrix}\qquad\mu=\begin{bmatrix}\mu_1\\\mu_2\end{bmatrix}\qquad\varSigma=\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix} $$ 上一小节的最后我们得到了: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\tag{4} \end{align} $$ 现在来考虑一下$p\left(x;\mu,\varSigma\right)=c$时由平面上所有点组成的level set,其中$c\in\mathbb R$为某些常数。计算所有$x_1,x_2\in\mathbb R$: $$ \begin{align} c&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ 2\pi c\sigma_1\sigma_2&=\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ \log\left(2\pi c\sigma_1\sigma_2\right)&=-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\\ \log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)&=\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2+\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\\ 1&=\frac{\left(x_1-\mu_1\right)^2}{2\sigma_1^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)}+\frac{\left(x_2-\mu_2\right)^2}{2\sigma_2^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)} \end{align} $$ 定义$\displaystyle r_1=\sqrt{2\sigma_1^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)},\ r_2=\sqrt{2\sigma_2^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)}$,则有: $$ 1=\left(\frac{x_1-\mu_1}{r_1}\right)^2+\left(\frac{x_2-\mu_2}{r_2}\right)^2\tag{5} $$ $(5)$式就是高中解析几何讲过的长短轴与坐标轴平行的**椭圆(axis-aligned ellipse)**,其中心位于$\left(\mu_1,\mu_2\right)$,与$x_1$平行的轴长为$2r_1$,与$x_2$平行的轴长为$2r_2$。 ## 4.2 轴长 <img src="./resource/sn07_image02.png" width="800" alt="" align=center /> (左图为以$\mu=\begin{bmatrix}3\\2\end{bmatrix}$为期望、以对角矩阵$\varSigma=\begin{bmatrix}25&0\\0&9\end{bmatrix}$为协方差矩阵的概率密度函数的热力图。可以看到椭圆的中心位于$(3,2)$,椭圆的长短轴之比为$5:3$。右图为以$\mu=\begin{bmatrix}3\\2\end{bmatrix}$为期望、以对角矩阵$\varSigma=\begin{bmatrix}10&5\\5&5\end{bmatrix}$为协方差矩阵的概率密度函数的热力图,它的长短轴并不与坐标轴垂直。椭圆的中心仍在$(3,2)$,但是长短轴被某个线性变换旋转了一个角度。) 为了更好的理解level curves的形状是如何随着多元高斯分布的随机变量的改变而改变的,假设我们对$c$取高斯分布概率密度峰值$1/e$时的$r_1,r_2$感兴趣。 先观察$(4)$式的最大值,此时$x_1=\mu_1,x_2=\mu_2$,代回$(4)$式得到高斯分布概率密度的峰值为$\frac{1}{2\pi\sigma_1\sigma_2}$。 再令$c=\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)$,带入$r_1,r_2$求得: $$ \begin{eqnarray} r_1&=\sqrt{2\sigma_1^2\log\left(\frac{1}{2\pi\sigma_1\sigma_2\cdot\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)}\right)}&=\sigma_1\sqrt 2\\ r_2&=\sqrt{2\sigma_2^2\log\left(\frac{1}{2\pi\sigma_1\sigma_2\cdot\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)}\right)}&=\sigma_2\sqrt 2 \end{eqnarray} $$ 从这里可以看出,对于第$i$个维度(分量$x_i$),使其概率达到高斯分布概率密度峰值的$1/e$的相应轴长($r_i$)与该维度的相应标准差$\sigma_i$呈正比。从直觉上讲这也是对的:随机变量$x_i$的方差越小,则在该维度上高斯分布图像的峰值就越“紧凑”,于是反映在isocontours上椭圆在该维度上的轴$r_i$就越短。 ## 4.3 非对角协方差矩阵及高维情形 很明显,上面的推导依赖于$\varSigma$是对角矩阵这一假设,不过即使在非对角矩阵的情形下,推导的结论也有相似之处。推广到一般情况下,isocontours的图像不再是长短轴与坐标轴平行的椭圆了,现在的椭圆这是被**旋转**了一个角度而已。再推广到高维情形下,在$n$维环境中,只有level set的几何形态变成了$\mathbb R^n$中的椭球面而已。 ## 5. 线性变换的解释 前几个小节我们主要关注对具有角型协方差矩阵的多元高斯分布是如何变化的。我们还发现具有对角型协方差矩阵的$n$维多元高斯分布其实可以被看做是以$\mu_i$为期望、$\sigma_i^2$为方差的$n$个相互独立的高斯分布的随机变量组成的分布。在这一小节,我们再从变量值的角度解释一下非对角协方差矩阵的情形。 这一小节的关键在于下面这个定理(证明见附录A.2中): **定理1:**令$X\sim\mathcal N\left(\mu,\varSigma\right),\ \mu\in\mathbb R^n,\ \sigma\in\mathbb S_{++}^n$,则存在矩阵$B\in\mathbb R^n$,若定义$Z=B^{-1}X(X-\mu)$,则$Z\sim\mathcal N(0,I)$。 注意到如果$Z\sim\mathcal N(0,I)$,利用第4节的知识,则$Z$可以被看做是由$n$个相互独立的标准正态分布($Z_i\sim\mathcal N(0,1)$)组成的。再进一步,如果$Z=B^{-1}(X-\mu)$,则用简单的代数就可以知道$X=BZ+\mu$。 因此,这个定理指出,任何服从多元高斯分布的随机变量$X$都能够通过一个线性变换($X=BZ+\mu$)分解为$n$个相互独立的标准正态分布。 ## 附录A.1 我们来证明$(1)$式的前一个等式 (后一个等式直接展开即可得到$\mathrm E\left[(X-\mu)(X-\mu)^T\right]=\mathrm E\left[XX^T-X\mu^T-\mu X^T+\mu\mu^T\right]=\mathrm E\left[XX^T\right]-\mathrm E[X]\mu^T-\mu\mathrm E\left[X^T\right]+\mu\mu^T=\mathrm E\left[XX^T\right]-\mu\mu^T$,注意$E[X]=\mu$,而常数的期望是常数。) $$ \begin{align} \varSigma&= \begin{bmatrix} \mathrm{Cov}[X_1,X_1]&\cdots&\mathrm{Cov}[X_1,X_n]\\ \vdots&\ddots&\vdots\\ \mathrm{Cov}[X_n,X_1]&\cdots&\mathrm{Cov}[X_n,X_n] \end{bmatrix}\\ &=\begin{bmatrix} \mathrm{E}\left[(X_1-\mu_1)^2\right]&\cdots&\mathrm{E}\left[(X_1-\mu_1)(X_n-\mu_n)\right]\\ \vdots&\ddots&\vdots\\ \mathrm{E}\left[(X_n-\mu_n)(X_1-\mu_1)\right]&\cdots&\mathrm{E}\left[(X_n-\mu_n)^2\right] \end{bmatrix}\\ &=\mathrm{E}\begin{bmatrix} (X_1-\mu_1)^2&\cdots&(X_1-\mu_1)(X_n-\mu_n)\\ \vdots&\ddots&\vdots\\ (X_n-\mu_n)(X_1-\mu_1)&\cdots&(X_n-\mu_n)^2 \end{bmatrix}\tag{6}\\ &=\mathrm E\begin{bmatrix}\begin{bmatrix}X_1-\mu_1\\\vdots\\X_n-\mu_n\end{bmatrix}\begin{bmatrix}X_1-\mu_1\cdots X_n-\mu_n\end{bmatrix}\end{bmatrix}\tag{7}\\ &=\mathrm E\left[(X-\mu)(X-\mu)^T\right] \end{align} $$ $(6)$式的根据是矩阵的期望就是对矩阵每一个元素取期望,而$(7)$式的根据是向量乘法: $$ zz^T=\begin{bmatrix}z_1\\z_2\\\vdots\\z_n\end{bmatrix}\begin{bmatrix}z_1&z_2&\cdots&z_n\end{bmatrix} =\begin{bmatrix}z_1z_1&z_1z_2&\cdots&z_1z_n\\z_2z_1&z_2z_2&\cdots&z_2z_n\\\vdots&\vdots&\ddots&\vdots\\z_nz_1&z_nz_2&\cdots&z_nz_n\end{bmatrix} $$ ## 附录A.2 **证明定理1:**令$X\sim\mathcal N\left(\mu,\varSigma\right),\ \mu\in\mathbb R^n,\ \sigma\in\mathbb S_{++}^n$,则存在矩阵$B\in\mathbb R^n$,若定义$Z=B^{-1}X(X-\mu)$,则$Z\sim\mathcal N(0,I)$。 证明分为两步:我们先要证明$\varSigma$可以被分解为$\varSigma=BB^T$的形式,其中$B$是某个可逆矩阵;之后再将随机变量$X$使用线性变换$Z=B^{-1}(X-\mu)$变为随机变量$Z$。 **第一步:分解协方差矩阵。**回忆[线性代数](sn01.ipynb)笔记中关于对称矩阵的两个性质(见“对称矩阵的特征值与特征向量”一节): 1. 任意实对称矩阵$A\in\mathbb R^{n\times n}$必定能被写成$A=U\varLambda U^T$的形式,其中$U$是一个满秩正交矩阵,每一列都来自$A$的特征向量;$\varLambda$是一个对角矩阵,对角线元素均来自$A$的特征值。 2. 如果$A$是对称正定矩阵,则$A$的特征值均为正值。 由于协方差矩阵$\varSigma$是一个正定矩阵,则根据性质1就可以使用恰当的$U,\varLambda$将矩阵分解为$A=U\varLambda U^T$。再根据第二个性质,可以定义矩阵$\varLambda^{1/2}\in\mathbb R^{n\times n}$,该对角矩阵中对角线元素皆为原$\varLambda$对角线元素的平方根。所以有$\varLambda=\varLambda^{1/2}\left(\varLambda^{1/2}\right)^T$,那么可以将$\varSigma$进一步分解为: $$ \varSigma= U\varLambda U^T= U\varLambda^{1/2}\left(\varLambda^{1/2}\right)^TU^T= U\varLambda^{1/2}\left(U\varLambda^{1/2}\right)^T= BB^T $$ 其中$B=U\varLambda^{1/2}$。(关于$B$是可逆矩阵:很明显正交矩阵$U$是可逆的,而可逆矩阵$U$右乘满秩对角矩阵后,仅会对$U$每列的大小产生影响,并不会改变$U$的秩,得证。)于是可以得到$\varSigma^{-1}=B^{-T}B^{-1}$,将其代入多元高斯分布的概率密度函数: $$ p\left(x;\mu,\varSigma\right)=\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\exp\left(-\frac{1}{2}\left(x-\mu\right)^TB^{-T}B^{-1}\left(x-\mu\right)\right)\tag{8} $$ **第二步:改变随机变量。**定义向量形式的随机变量$Z=B^{-1}(X-\mu)$。介绍一个概率论基本公式(并没有在概率论的笔记中出现),用来描述原随机变量与变更后随机变量间的关系: * 设随机变量$X=[X_1,\cdots,X_n]^T\in\mathbb R^n$是一个向量形随机变量,其联合概率密度函数为$f_X:\mathbb R^n\to \mathbb R$。若$Z=H(X)\in\mathbb R^n$,其中$H$是一个双射可微函数,则随机变量$Z$的联合概率密度函数为$f_Z:\mathbb R^n\to\mathbb R$,其中$f_z$定义为: $$ f_Z(z)=f_X(x)\cdot \left\lvert \det\left( \begin{bmatrix} \frac{\partial x_1}{\partial z_1}&\cdots&\frac{\partial x_1}{\partial z_n}\\ \vdots&\ddots&\vdots\\ \frac{\partial x_n}{\partial z_1}&\cdots&\frac{\partial x_n}{\partial z_n} \end{bmatrix} \right) \right\rvert $$ 使用改变随机变量的公式,(此处跳过线性代数计算)可以发现随机变量$Z$具有如下的联合概率密度: $$ p_Z(z)=\frac{1}{(2\pi)^{n/2}}\exp\left(-\frac{1}{2}z^Tz\right)\tag{9} $$ 得证。
0.364551
0.926437
## ERDAP with erddapy example for PMEL tools - Shiptracks ***requires python 3.6*** for passing time information (pandas datetime to timestamp doesn't appear to work in 2.7) ### connecting and basic information ``` from erddapy import ERDDAP import pandas as pd import numpy as np server_url='http://krafla.pmel.noaa.gov:8080/erddap' e = ERDDAP(server=server_url) ``` Get only Shiptrack datafiles ``` df = pd.read_csv(e.get_search_url(response='csv', search_for='shiptrack')) shiptracks = df['Dataset ID'].values print(shiptracks) kw = { 'standard_name': 'sea_water_temperature', 'min_lon': -180.0, 'max_lon': -130.0, 'min_lat': 50.0, 'max_lat': 90.0, 'min_time': '2017-01-10T00:00:00Z', 'max_time': '2018-01-10T00:00:00Z', 'cdm_data_type': 'trajectory' } variables = [e.get_var_by_attr(dataset_id=ship, standard_name=lambda v: v is not None) for ship in shiptracks] common_variables = set(variables[0]).intersection(*variables[1:]) common_variables constraints = { 'longitude>=': kw['min_lon'], 'longitude<=': kw['max_lon'], 'latitude>=': kw['min_lat'], 'latitude<=': kw['max_lat'], 'time>=': kw['min_time'], 'time<=': kw['max_time'], } download_url = e.get_download_url( dataset_id=shiptracks[0], protocol='tabledap', response='csv', variables=common_variables, constraints=constraints ) print(download_url) from requests.exceptions import HTTPError dfs = {} for ship in shiptracks: print(ship) try: e = ERDDAP(server=server_url, dataset_id=ship, protocol='tabledap', response='csv', variables=common_variables, constraints=constraints ) dfs.update({ship: e.to_pandas( index_col='time', parse_dates=True, skiprows=(1,) # units information can be dropped. )}) except HTTPError: print('Failed to generate url {}'.format(ship)) continue #SCS GPX data is every second, downsample to hourly dfh = {} for ship, df in dfs.items(): dfh.update({ship: df.resample('1H').mean()}) %matplotlib inline import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter def make_map(projection=ccrs.PlateCarree()): fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(projection=projection)) if projection == ccrs.PlateCarree(): gl = ax.gridlines(draw_labels=True) gl.xlabels_top = gl.ylabels_right = False gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER return fig, ax projection=ccrs.LambertConformal(central_longitude=-160.0) transformation=ccrs.PlateCarree() land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m', edgecolor='face', facecolor='1.0') dx = dy = 0.5 extent = -180, -130, kw['min_lat']+dy, kw['max_lat']+dy extent = [-180, -130, 50, 66] fig,ax = make_map(projection=projection) for ship, df in dfh.items(): ax.plot(df['longitude'], df['latitude'], label=ship, transform=transformation) leg = ax.legend() ax.add_feature(land_50m) ax.coastlines(resolution='50m') ax.set_extent(extent) ```
github_jupyter
from erddapy import ERDDAP import pandas as pd import numpy as np server_url='http://krafla.pmel.noaa.gov:8080/erddap' e = ERDDAP(server=server_url) df = pd.read_csv(e.get_search_url(response='csv', search_for='shiptrack')) shiptracks = df['Dataset ID'].values print(shiptracks) kw = { 'standard_name': 'sea_water_temperature', 'min_lon': -180.0, 'max_lon': -130.0, 'min_lat': 50.0, 'max_lat': 90.0, 'min_time': '2017-01-10T00:00:00Z', 'max_time': '2018-01-10T00:00:00Z', 'cdm_data_type': 'trajectory' } variables = [e.get_var_by_attr(dataset_id=ship, standard_name=lambda v: v is not None) for ship in shiptracks] common_variables = set(variables[0]).intersection(*variables[1:]) common_variables constraints = { 'longitude>=': kw['min_lon'], 'longitude<=': kw['max_lon'], 'latitude>=': kw['min_lat'], 'latitude<=': kw['max_lat'], 'time>=': kw['min_time'], 'time<=': kw['max_time'], } download_url = e.get_download_url( dataset_id=shiptracks[0], protocol='tabledap', response='csv', variables=common_variables, constraints=constraints ) print(download_url) from requests.exceptions import HTTPError dfs = {} for ship in shiptracks: print(ship) try: e = ERDDAP(server=server_url, dataset_id=ship, protocol='tabledap', response='csv', variables=common_variables, constraints=constraints ) dfs.update({ship: e.to_pandas( index_col='time', parse_dates=True, skiprows=(1,) # units information can be dropped. )}) except HTTPError: print('Failed to generate url {}'.format(ship)) continue #SCS GPX data is every second, downsample to hourly dfh = {} for ship, df in dfs.items(): dfh.update({ship: df.resample('1H').mean()}) %matplotlib inline import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter def make_map(projection=ccrs.PlateCarree()): fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(projection=projection)) if projection == ccrs.PlateCarree(): gl = ax.gridlines(draw_labels=True) gl.xlabels_top = gl.ylabels_right = False gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER return fig, ax projection=ccrs.LambertConformal(central_longitude=-160.0) transformation=ccrs.PlateCarree() land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m', edgecolor='face', facecolor='1.0') dx = dy = 0.5 extent = -180, -130, kw['min_lat']+dy, kw['max_lat']+dy extent = [-180, -130, 50, 66] fig,ax = make_map(projection=projection) for ship, df in dfh.items(): ax.plot(df['longitude'], df['latitude'], label=ship, transform=transformation) leg = ax.legend() ax.add_feature(land_50m) ax.coastlines(resolution='50m') ax.set_extent(extent)
0.336658
0.735903
# 01_srnn_go_nogo In this notebook we prepare a very simple spiking RNN that is capable of performing a Go / NoGo task. ![GoNoGo.png](attachment:GoNoGo.png) This is an attempt to reproduce parts of Fig 1 from [Kim, Li, and Sejnowski, PNAS, 2019](https://www.pnas.org/content/116/45/22811.short) using Nengo. * Each network was trained to produce a positive mean population activity approaching +1 after a brief (125 msec) input pulse. * For a trial without an input pulse (i.e., NoGo trial), the networks were trained to maintain the output signal close to 0. * output(t) = W * R(t) Where W is the readout weights and R is the recurrent network. * 9 different network sizes from 10 to 400 neurons, each trained 100 times. 250 was best. * synaptic time constants from 20 to 50 ms * Trial was correct if maximum output during response window was > 0.7 for Go and < 0.3 for nogo. * trained for up to 6000 trials ## Environment Setup ``` #@title Run Environment Setup try: # See if we are running on google.colab from google.colab import files IN_COLAB = True !pip install --upgrade nengo nengo-gui nbdev git+https://github.com/neuromorphs/grill-srnn-pfc.git !jupyter serverextension enable nengo_gui.jupyter # TODO: kaggle creds for downloading data except ModuleNotFoundError: IN_COLAB = False %load_ext autoreload %autoreload 2 ``` ## Task Stimulus Encoding [See rkim35 settings](https://github.com/rkim35/spikeRNN/blob/f65f8a3a3f55fd0cb795043a464da11cec52a4f8/rate/main.py#L102-L108) ``` import numpy as np n_trials_per_condition = 3000 # x 2 go-or-nogo rng_seed = 1337 isi_dur = 0.25 cue_dur = 0.125 response_dur = 0.625 tau = 0.05 phase_durs = [isi_dur, cue_dur, response_dur] cumulative_durs = np.cumsum(phase_durs) trial_dur = cumulative_durs[-1] # ISI period provides resetting inhibition def isi(t): return (t % trial_dur) < isi_dur trial_conds = np.tile(np.arange(2), n_trials_per_condition) rng = np.random.default_rng(seed=rng_seed) rng.shuffle(trial_conds) def stim_signal(t): trial_ix = int(np.floor(t / trial_dur)) % len(trial_conds) cond_ix = trial_conds[trial_ix] if cond_ix: t_trial = t % trial_dur go_cue = t_trial >= cumulative_durs[0] and t_trial < cumulative_durs[1] return go_cue return 0 # Expected response - used if using an error-based learning rule. def expected_response(t): trial_ix = int(np.floor(t / trial_dur)) % len(trial_conds) cond_ix = trial_conds[trial_ix] if cond_ix: t_trial = t % trial_dur if t_trial >= cumulative_durs[0]: return 1 # - np.exp(-5.0 * (t_trial - cumulative_durs[0])) return 0 import matplotlib.pyplot as plt test_trials = 3 test_srate = 1000 # Hz t_vec = np.arange(0, trial_dur * test_trials, 1 / test_srate) plt.figure(figsize=(8, 5)) plt.subplot(3, 1, 1) plt.plot(t_vec, isi(t_vec)) plt.ylabel('Hold') plt.subplot(3, 1, 2) plt.plot(t_vec, [stim_signal(_) for _ in t_vec]) plt.ylabel('Stim') plt.legend(['x', 'y'], loc='upper right') plt.subplot(3, 1, 3) plt.plot(t_vec, [expected_response(_) for _ in t_vec]) plt.ylabel('Ideal') plt.tight_layout() ``` ## Model Setup TODO: * integrator: correctly parameterize its connections so it is a true integrator... but scale so that the relatively short input period (0.1) yields an integration == 1.0. * Add option to train with an error signal compared to output. ``` import nengo import numpy as np class GoNoGo(nengo.Network): def __init__(self, tau=0.035, n_neurons=1000, use_nef=True, train_trials=80): # Input Nodes # Fixation: -1 except during response period. self.isi = nengo.Node(isi, size_out=1, label='isi') # Stim: 1 during Cue period for Go trials. Otherwise 0. self.stim = nengo.Node(stim_signal, size_out=1, label='stim') # Ideal: Approximation of known correct answer self.ideal = nengo.Node(expected_response, label='ideal') # SNN to do the task self.ens = nengo.Ensemble(n_neurons=n_neurons, dimensions=2, label='srnn') # Stim input to ensemble dim-1. nengo.Connection(self.stim, self.ens[1], transform=1, synapse=tau) # Recurrent connection - Either uses a function or a learning rule, depending on use_nef def recurr_fun(x): return 3 * tau * x[1] + x[0], x[1] lr_type = None if use_nef else nengo.PES(learning_rate=1e-4) recurr = nengo.Connection(self.ens, self.ens, function=recurr_fun if use_nef else None, learning_rule_type=lr_type, synapse=tau) # Suppression input to ensemble neurons nengo.Connection(self.isi, self.ens.neurons, transform=[[-1]] * n_neurons) # error signal = output - answer # Error signal is 2-d for learning rule, but dim=1 error is always 0. t_train = trial_dur * train_trials self.error = nengo.Node(lambda t, x: (x[0], 0) if t <= t_train else (0, 0), size_in=1, size_out=2, label='error') nengo.Connection(self.ens[0], self.error[0]) nengo.Connection(self.ideal, self.error[0], transform=-1) if not use_nef: nengo.Connection(self.error, recurr.learning_rule) from nengo_gui.ipython import IPythonViz with nengo.Network() as inet: inet.gonogo = GoNoGo(tau=0.035, n_neurons=1000, use_nef=False, train_trials=10) IPythonViz(inet) def test_gonogo(net, sim_trials=160): sim_time = sim_trials * trial_dur with net: net.gonogo = GoNoGo(tau=0.035, n_neurons=1000, use_nef=False, train_trials=sim_trials//2) probes = { 'isi': nengo.Probe(net.gonogo.isi), 'stim': nengo.Probe(net.gonogo.stim), 'ideal': nengo.Probe(net.gonogo.ideal), 'ensemble': nengo.Probe(net.gonogo.ens), 'error': nengo.Probe(net.gonogo.error) } with nengo.Simulator(net) as sim: sim.run(sim_time) return sim, probes SIM_TRIALS = 160 net = nengo.Network() sim, probes = test_gonogo(net, sim_trials=SIM_TRIALS) t = sim.trange() xrange = [int(SIM_TRIALS//2 * 0.95) * trial_dur, int(SIM_TRIALS//2 * 1.05) * trial_dur] plt.figure(figsize=(8, 6)) plt.subplot(2, 2, 1) plt.plot(t, sim.data[probes['stim']]) plt.xlim(xrange) plt.ylabel('Stim') plt.subplot(2, 2, 3) plt.plot(t, sim.data[probes['isi']]) plt.xlim(xrange) plt.ylabel('ISI') plt.subplot(2, 2, 2) plt.plot(t, sim.data[probes['ensemble']][:, 0], label='ensemble') plt.plot(t, sim.data[probes['ideal']], label='ideal') plt.xlim(xrange) plt.axvline(SIM_TRIALS//2 * trial_dur, color='k') plt.legend() plt.subplot(2, 2, 4) plt.plot(t, sim.data[probes['error']]) plt.xlim(xrange) plt.ylabel('Error') plt.tight_layout() ```
github_jupyter
#@title Run Environment Setup try: # See if we are running on google.colab from google.colab import files IN_COLAB = True !pip install --upgrade nengo nengo-gui nbdev git+https://github.com/neuromorphs/grill-srnn-pfc.git !jupyter serverextension enable nengo_gui.jupyter # TODO: kaggle creds for downloading data except ModuleNotFoundError: IN_COLAB = False %load_ext autoreload %autoreload 2 import numpy as np n_trials_per_condition = 3000 # x 2 go-or-nogo rng_seed = 1337 isi_dur = 0.25 cue_dur = 0.125 response_dur = 0.625 tau = 0.05 phase_durs = [isi_dur, cue_dur, response_dur] cumulative_durs = np.cumsum(phase_durs) trial_dur = cumulative_durs[-1] # ISI period provides resetting inhibition def isi(t): return (t % trial_dur) < isi_dur trial_conds = np.tile(np.arange(2), n_trials_per_condition) rng = np.random.default_rng(seed=rng_seed) rng.shuffle(trial_conds) def stim_signal(t): trial_ix = int(np.floor(t / trial_dur)) % len(trial_conds) cond_ix = trial_conds[trial_ix] if cond_ix: t_trial = t % trial_dur go_cue = t_trial >= cumulative_durs[0] and t_trial < cumulative_durs[1] return go_cue return 0 # Expected response - used if using an error-based learning rule. def expected_response(t): trial_ix = int(np.floor(t / trial_dur)) % len(trial_conds) cond_ix = trial_conds[trial_ix] if cond_ix: t_trial = t % trial_dur if t_trial >= cumulative_durs[0]: return 1 # - np.exp(-5.0 * (t_trial - cumulative_durs[0])) return 0 import matplotlib.pyplot as plt test_trials = 3 test_srate = 1000 # Hz t_vec = np.arange(0, trial_dur * test_trials, 1 / test_srate) plt.figure(figsize=(8, 5)) plt.subplot(3, 1, 1) plt.plot(t_vec, isi(t_vec)) plt.ylabel('Hold') plt.subplot(3, 1, 2) plt.plot(t_vec, [stim_signal(_) for _ in t_vec]) plt.ylabel('Stim') plt.legend(['x', 'y'], loc='upper right') plt.subplot(3, 1, 3) plt.plot(t_vec, [expected_response(_) for _ in t_vec]) plt.ylabel('Ideal') plt.tight_layout() import nengo import numpy as np class GoNoGo(nengo.Network): def __init__(self, tau=0.035, n_neurons=1000, use_nef=True, train_trials=80): # Input Nodes # Fixation: -1 except during response period. self.isi = nengo.Node(isi, size_out=1, label='isi') # Stim: 1 during Cue period for Go trials. Otherwise 0. self.stim = nengo.Node(stim_signal, size_out=1, label='stim') # Ideal: Approximation of known correct answer self.ideal = nengo.Node(expected_response, label='ideal') # SNN to do the task self.ens = nengo.Ensemble(n_neurons=n_neurons, dimensions=2, label='srnn') # Stim input to ensemble dim-1. nengo.Connection(self.stim, self.ens[1], transform=1, synapse=tau) # Recurrent connection - Either uses a function or a learning rule, depending on use_nef def recurr_fun(x): return 3 * tau * x[1] + x[0], x[1] lr_type = None if use_nef else nengo.PES(learning_rate=1e-4) recurr = nengo.Connection(self.ens, self.ens, function=recurr_fun if use_nef else None, learning_rule_type=lr_type, synapse=tau) # Suppression input to ensemble neurons nengo.Connection(self.isi, self.ens.neurons, transform=[[-1]] * n_neurons) # error signal = output - answer # Error signal is 2-d for learning rule, but dim=1 error is always 0. t_train = trial_dur * train_trials self.error = nengo.Node(lambda t, x: (x[0], 0) if t <= t_train else (0, 0), size_in=1, size_out=2, label='error') nengo.Connection(self.ens[0], self.error[0]) nengo.Connection(self.ideal, self.error[0], transform=-1) if not use_nef: nengo.Connection(self.error, recurr.learning_rule) from nengo_gui.ipython import IPythonViz with nengo.Network() as inet: inet.gonogo = GoNoGo(tau=0.035, n_neurons=1000, use_nef=False, train_trials=10) IPythonViz(inet) def test_gonogo(net, sim_trials=160): sim_time = sim_trials * trial_dur with net: net.gonogo = GoNoGo(tau=0.035, n_neurons=1000, use_nef=False, train_trials=sim_trials//2) probes = { 'isi': nengo.Probe(net.gonogo.isi), 'stim': nengo.Probe(net.gonogo.stim), 'ideal': nengo.Probe(net.gonogo.ideal), 'ensemble': nengo.Probe(net.gonogo.ens), 'error': nengo.Probe(net.gonogo.error) } with nengo.Simulator(net) as sim: sim.run(sim_time) return sim, probes SIM_TRIALS = 160 net = nengo.Network() sim, probes = test_gonogo(net, sim_trials=SIM_TRIALS) t = sim.trange() xrange = [int(SIM_TRIALS//2 * 0.95) * trial_dur, int(SIM_TRIALS//2 * 1.05) * trial_dur] plt.figure(figsize=(8, 6)) plt.subplot(2, 2, 1) plt.plot(t, sim.data[probes['stim']]) plt.xlim(xrange) plt.ylabel('Stim') plt.subplot(2, 2, 3) plt.plot(t, sim.data[probes['isi']]) plt.xlim(xrange) plt.ylabel('ISI') plt.subplot(2, 2, 2) plt.plot(t, sim.data[probes['ensemble']][:, 0], label='ensemble') plt.plot(t, sim.data[probes['ideal']], label='ideal') plt.xlim(xrange) plt.axvline(SIM_TRIALS//2 * trial_dur, color='k') plt.legend() plt.subplot(2, 2, 4) plt.plot(t, sim.data[probes['error']]) plt.xlim(xrange) plt.ylabel('Error') plt.tight_layout()
0.386763
0.882377
## Exploring Titanic Dataset ``` # imports import numpy as np import pandas as pd import os # Adding to ignore warnings import warnings warnings.filterwarnings('ignore') ``` #### Importing Data ``` # setting data paths raw_data_path = os.path.join(os.path.pardir,'data','raw') train_data_path = os.path.join(raw_data_path,'train.csv') test_data_path = os.path.join(raw_data_path, 'test.csv') # read data train_df = pd.read_csv(train_data_path, index_col='PassengerId') test_df = pd.read_csv(test_data_path, index_col='PassengerId') type(test_df) ``` ### Basic Structure ``` train_df.info() test_df.info() test_df['Survived'] = 77 # Adding random default value to test data df = pd.concat((train_df,test_df),axis=0) df.info() df.head() df.tail() df.tail(10) df.Name df['Name'] df[['Name','Age']] # indexing use loc for label based indexing # all columns df.loc[7:10,] # selecting column range df.loc[7:10,'Pclass':'Age'] # selecting discrete columns df.loc[7:10,['Name','Sex','Age']] # indexing : use iloc for position based indexing df.iloc[7:10,4:9] male_passengers = df.loc[df.Sex =='male',:] print ('No of male passengers {0}'.format(len(male_passengers))) male_passengers_first_cls = df.loc[((df.Sex == 'male') & (df.Pclass == 1)),:] print ('No of male passengers in first class {0}'.format(len(male_passengers_first_cls))) ``` #### Sumary Statistics ``` df.describe() # numerical measures # centrality measure print('Mean : {0}'.format(df.Fare.mean())) print('Median : {0}'.format(df.Fare.median())) # dispersion measures print('Min Fare: {0}'.format(df.Fare.min())) print('Max Fare: {0}'.format(df.Fare.max())) print('Fare Range: {0}'.format(df.Fare.max()-df.Fare.min())) print('25 percentile: {0}'.format(df.Fare.quantile(.25))) print('50 percentile: {0}'.format(df.Fare.quantile(.50))) print('75 percentile: {0}'.format(df.Fare.quantile(.75))) print('Variance Fare: {0}'.format(df.Fare.var())) print('Standard Deviation Fare: {0}'.format(df.Fare.std())) %matplotlib inline df.Fare.plot(kind='box') # use include= all to get statistics of all columns df.describe(include='all') # categorical column: counts df.Sex.value_counts() # categorical column: proportions df.Sex.value_counts(normalize=True) # Applying on other columns df[df.Survived != 77].Survived.value_counts() df.Pclass.value_counts() df.Pclass.value_counts().plot(kind='bar') df.Pclass.value_counts().plot(kind='bar',rot=0,title='Classwise Passengers',color='g') ``` #### Distributions ``` df.Age.plot(kind='hist', title='Histogram for Age', color = 'b') df.Age.plot(kind='hist',bins=20, title='Histogram for Age', color = 'm') df.Age.plot(kind='kde', title='Density plot for Age',color='r') df.Fare.plot(kind='hist', bins=20,title='Histogram for fare', color='c') print('Skewness for Age : {0:.2f}'.format(df.Age.skew())) print('Skewness for Fare : {0:.2f}'.format(df.Fare.skew())) # use scatter plot for bi-variate distribution df.plot.scatter(x='Age', y='Fare', title='Scatter Plot: Age vs Fare', color = 'g', alpha = 0.1) df.plot.scatter(x='Pclass', y='Fare', title = 'Scatter Plot: Pclass vs Fare', alpha = 0.05, color='g') ``` #### Grouping and Aggregations ``` # group by df.groupby('Sex').Age.median() df.groupby(['Pclass']).Fare.median() df.groupby(['Pclass']).Age.median() df.groupby(['Pclass'])['Fare','Age'].median() df.groupby(['Pclass']).agg({'Fare':'mean','Age':'median'}) aggregations = { 'Fare': [ ('mean_fare','mean'), ('median_fare','median'), ('max_fare',max), ('min_fare',min) ], 'Age': [ ('median_age','median'), ('min_age',min), ('max_age',max), ('range_age', lambda x: max(x)-min(x)) ] } df.groupby(['Pclass']).agg(aggregations) df.groupby(['Pclass','Embarked']).Fare.median() ``` #### Crosstabs ``` # crosstab on Sex and Pclass pd.crosstab(df.Sex, df.Pclass) pd.crosstab(df.Sex, df.Pclass).plot(kind='bar') ``` #### Pivots ``` # pivot table df.pivot_table(index='Sex', columns='Pclass', values='Age', aggfunc='mean') df.groupby(['Sex','Pclass']).Age.mean().unstack() ```
github_jupyter
# imports import numpy as np import pandas as pd import os # Adding to ignore warnings import warnings warnings.filterwarnings('ignore') # setting data paths raw_data_path = os.path.join(os.path.pardir,'data','raw') train_data_path = os.path.join(raw_data_path,'train.csv') test_data_path = os.path.join(raw_data_path, 'test.csv') # read data train_df = pd.read_csv(train_data_path, index_col='PassengerId') test_df = pd.read_csv(test_data_path, index_col='PassengerId') type(test_df) train_df.info() test_df.info() test_df['Survived'] = 77 # Adding random default value to test data df = pd.concat((train_df,test_df),axis=0) df.info() df.head() df.tail() df.tail(10) df.Name df['Name'] df[['Name','Age']] # indexing use loc for label based indexing # all columns df.loc[7:10,] # selecting column range df.loc[7:10,'Pclass':'Age'] # selecting discrete columns df.loc[7:10,['Name','Sex','Age']] # indexing : use iloc for position based indexing df.iloc[7:10,4:9] male_passengers = df.loc[df.Sex =='male',:] print ('No of male passengers {0}'.format(len(male_passengers))) male_passengers_first_cls = df.loc[((df.Sex == 'male') & (df.Pclass == 1)),:] print ('No of male passengers in first class {0}'.format(len(male_passengers_first_cls))) df.describe() # numerical measures # centrality measure print('Mean : {0}'.format(df.Fare.mean())) print('Median : {0}'.format(df.Fare.median())) # dispersion measures print('Min Fare: {0}'.format(df.Fare.min())) print('Max Fare: {0}'.format(df.Fare.max())) print('Fare Range: {0}'.format(df.Fare.max()-df.Fare.min())) print('25 percentile: {0}'.format(df.Fare.quantile(.25))) print('50 percentile: {0}'.format(df.Fare.quantile(.50))) print('75 percentile: {0}'.format(df.Fare.quantile(.75))) print('Variance Fare: {0}'.format(df.Fare.var())) print('Standard Deviation Fare: {0}'.format(df.Fare.std())) %matplotlib inline df.Fare.plot(kind='box') # use include= all to get statistics of all columns df.describe(include='all') # categorical column: counts df.Sex.value_counts() # categorical column: proportions df.Sex.value_counts(normalize=True) # Applying on other columns df[df.Survived != 77].Survived.value_counts() df.Pclass.value_counts() df.Pclass.value_counts().plot(kind='bar') df.Pclass.value_counts().plot(kind='bar',rot=0,title='Classwise Passengers',color='g') df.Age.plot(kind='hist', title='Histogram for Age', color = 'b') df.Age.plot(kind='hist',bins=20, title='Histogram for Age', color = 'm') df.Age.plot(kind='kde', title='Density plot for Age',color='r') df.Fare.plot(kind='hist', bins=20,title='Histogram for fare', color='c') print('Skewness for Age : {0:.2f}'.format(df.Age.skew())) print('Skewness for Fare : {0:.2f}'.format(df.Fare.skew())) # use scatter plot for bi-variate distribution df.plot.scatter(x='Age', y='Fare', title='Scatter Plot: Age vs Fare', color = 'g', alpha = 0.1) df.plot.scatter(x='Pclass', y='Fare', title = 'Scatter Plot: Pclass vs Fare', alpha = 0.05, color='g') # group by df.groupby('Sex').Age.median() df.groupby(['Pclass']).Fare.median() df.groupby(['Pclass']).Age.median() df.groupby(['Pclass'])['Fare','Age'].median() df.groupby(['Pclass']).agg({'Fare':'mean','Age':'median'}) aggregations = { 'Fare': [ ('mean_fare','mean'), ('median_fare','median'), ('max_fare',max), ('min_fare',min) ], 'Age': [ ('median_age','median'), ('min_age',min), ('max_age',max), ('range_age', lambda x: max(x)-min(x)) ] } df.groupby(['Pclass']).agg(aggregations) df.groupby(['Pclass','Embarked']).Fare.median() # crosstab on Sex and Pclass pd.crosstab(df.Sex, df.Pclass) pd.crosstab(df.Sex, df.Pclass).plot(kind='bar') # pivot table df.pivot_table(index='Sex', columns='Pclass', values='Age', aggfunc='mean') df.groupby(['Sex','Pclass']).Age.mean().unstack()
0.513425
0.871365
# Ex3 - Getting and Knowing your Data This time we are going to pull data directly from the internet. Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. ### Step 1. Import the necessary libraries ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user). ### Step 3. Assign it to a variable called users and use the 'user_id' as index ``` url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user' users = pd.read_table(url, index_col = 'user_id', sep = '|') ``` ### Step 4. See the first 25 entries ``` users.head(30) table = users.plot(kind = 'bar') ``` ### Step 5. See the last 10 entries ``` users.tail(10) ``` ### Step 6. What is the number of observations in the dataset? ``` print('***the number of observations =', users.shape[0], '***\n\n') users.info() #943 entries ``` ### Step 7. What is the number of columns in the dataset? ``` print('the number of columns =', users.shape[1]) ``` ### Step 8. Print the name of all the columns. ``` users.columns ``` ### Step 9. How is the dataset indexed? ``` users.index ``` ### Step 10. What is the data type of each column? ``` users.dtypes ``` ### Step 11. Print only the occupation column ``` users['occupation'] users.occupation ``` ### Step 12. How many different occupations are in this dataset? ``` #Show all the different occupations: users.occupation.value_counts() #show the number of different occupations: users.occupation.nunique() #OR, model answer: users.occupation.value_counts().count() ``` ### Step 13. What is the most frequent occupation? ``` users.occupation.value_counts() # student = 196 #Because "most" is asked #.head(1) = first one #.index[0] = information of first column* users.occupation.value_counts().head(1).index[0] ``` ### Step 14. Summarize the DataFrame. ``` users.describe() #Notice: by default, only the numeric columns are returned. ``` ### Step 15. Summarize all the columns ``` users.describe(include = "all") #Notice: By default, only the numeric columns are returned. ``` ### Step 16. Summarize only the occupation column ``` users.occupation.describe() ``` ### Step 17. What is the mean age of users? ``` #round(users.age.mean()) users.age.describe() ``` ### Step 18. What is the age with least occurrence? ``` users.age.value_counts().tail() #7, 10, 11, 66 and 73 years -> only 1 occurrence ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user' users = pd.read_table(url, index_col = 'user_id', sep = '|') users.head(30) table = users.plot(kind = 'bar') users.tail(10) print('***the number of observations =', users.shape[0], '***\n\n') users.info() #943 entries print('the number of columns =', users.shape[1]) users.columns users.index users.dtypes users['occupation'] users.occupation #Show all the different occupations: users.occupation.value_counts() #show the number of different occupations: users.occupation.nunique() #OR, model answer: users.occupation.value_counts().count() users.occupation.value_counts() # student = 196 #Because "most" is asked #.head(1) = first one #.index[0] = information of first column* users.occupation.value_counts().head(1).index[0] users.describe() #Notice: by default, only the numeric columns are returned. users.describe(include = "all") #Notice: By default, only the numeric columns are returned. users.occupation.describe() #round(users.age.mean()) users.age.describe() users.age.value_counts().tail() #7, 10, 11, 66 and 73 years -> only 1 occurrence
0.406155
0.989521
# Validation of EnzymeML documents EnzymeML is considered a container for data and does not perform any validation aside from data type checks. Hence, a user is free to insert whatever is necessary for the application without any restrictions. However, once data will be published to databases, data compliance needs to be guaranteed. For this, PyEnzyme allows EnzymeML documents to be validated against a database standard before upload. Databases can host a specific YAML file that can be generated from a spreadsheet, which in turn will validate compliance or not. In addition, if the document is non-compliant, a report will be given where and why a document received negative validation. The YAML validation file mirrors the complete EnzymeML data model and offers content to be checked on the following attributes: - __Mandatory__: Whether or not this field is required. - __Value ranges__: An interval where vertain values should be - __Controlled vocabularies__: For fields where only certain values are allowed. Intended to use for textual fields. The following example will demonstrate how to generate a EnzymeML Validation Spreadsheet and convert it to to a YAML file. Finally, an example `EnzymeMLDocument` will be loaded and validated against the given YAML file. For the sake of demonstration, validation will fail to display an example report. ``` import pyenzyme as pe ``` ### Generation and conversion of a validation spreadsheet The `EnzymeMLValidator` class has methods to generate and convert an EnzymeML validation spreadsheet. It should be noted, that the generated spreadsheet will always be up to the data models state and is not maintained manually. The `EnzymeMLDocument` class definition is recursively inferred to generate the file. This way, once the data model is extended, the spreadsheet will be updated too. ``` from pyenzyme.enzymeml.tools import EnzymeMLValidator # Generation of a validation spreadsheet EnzymeMLValidator.generateValidationSpreadsheet(".") # ... for those who like to go directly to YAML yaml_string = EnzymeMLValidator.generateValidationYAML(".") ``` ### Using an example spreadsheet Since the blank validation YAML wont demonstrate all types of checks, we are going to use an example that has been provided in this directory and convert it to YAML. ``` # Convert an example spreadsheet to YAML yaml_string = EnzymeMLValidator.convertSheetToYAML( path="EnzymeML_Validation_Template_Example.xlsx", filename="EnzymeML_Validation_Template_Example" ) ``` ### Performing validation Once the YAML file is ready, validation can be done for an example `EnzymeMLDocument` found in this directory. The validation for this example will fail by intention and thus return a report taht will be shown here. Such a report is returned as `Dict` and can be inspected either manually or programmatically. This was done to allow automation workflows to utilize validation. ``` # Load an example document enzmldoc = pe.EnzymeMLDocument.fromFile("Model_4.omex") # Perform validation against the preciously generated YAML report, is_valid = enzmldoc.validate(yaml_path="EnzymeML_Validation_Template_Example.yaml") print(f">> Document is valid: {is_valid}") # Lets inspect the report import json print(json.dumps(report, indent=4)) ``` -----
github_jupyter
import pyenzyme as pe from pyenzyme.enzymeml.tools import EnzymeMLValidator # Generation of a validation spreadsheet EnzymeMLValidator.generateValidationSpreadsheet(".") # ... for those who like to go directly to YAML yaml_string = EnzymeMLValidator.generateValidationYAML(".") # Convert an example spreadsheet to YAML yaml_string = EnzymeMLValidator.convertSheetToYAML( path="EnzymeML_Validation_Template_Example.xlsx", filename="EnzymeML_Validation_Template_Example" ) # Load an example document enzmldoc = pe.EnzymeMLDocument.fromFile("Model_4.omex") # Perform validation against the preciously generated YAML report, is_valid = enzmldoc.validate(yaml_path="EnzymeML_Validation_Template_Example.yaml") print(f">> Document is valid: {is_valid}") # Lets inspect the report import json print(json.dumps(report, indent=4))
0.656108
0.954478
## Hook callbacks This provides both a standalone class and a callback for registering and automatically deregistering [PyTorch hooks](https://pytorch.org/tutorials/beginner/former_torchies/nn_tutorial.html#forward-and-backward-function-hooks), along with some pre-defined hooks. Hooks can be attached to any [`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module), for either the forward or the backward pass. We'll start by looking at the pre-defined hook [`ActivationStats`](/callbacks.hooks.html#ActivationStats), then we'll see how to create our own. ``` from fastai.gen_doc.nbdoc import * from fastai.callbacks.hooks import * from fastai.train import * from fastai.vision import * show_doc(ActivationStats) ``` [`ActivationStats`](/callbacks.hooks.html#ActivationStats) saves the layer activations in `self.stats` for all `modules` passed to it. By default it will save activations for *all* modules. For instance: ``` path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) #learn = cnn_learner(data, models.resnet18, callback_fns=ActivationStats) learn = Learner(data, simple_cnn((3,16,16,2)), callback_fns=ActivationStats) learn.fit(1) ``` The saved `stats` is a `FloatTensor` of shape `(2,num_modules,num_batches)`. The first axis is `(mean,stdev)`. ``` len(learn.data.train_dl),len(learn.activation_stats.modules) learn.activation_stats.stats.shape ``` So this shows the standard deviation (`axis0==1`) of 2th last layer (`axis1==-2`) for each batch (`axis2`): ``` plt.plot(learn.activation_stats.stats[1][-2].numpy()); ``` ### Internal implementation ``` show_doc(ActivationStats.hook) ``` ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(ActivationStats.on_train_begin) show_doc(ActivationStats.on_batch_end) show_doc(ActivationStats.on_train_end) show_doc(Hook) ``` Registers and manually deregisters a [PyTorch hook](https://pytorch.org/tutorials/beginner/former_torchies/nn_tutorial.html#forward-and-backward-function-hooks). Your `hook_func` will be called automatically when forward/backward (depending on `is_forward`) for your module `m` is run, and the result of that function is placed in `self.stored`. ``` show_doc(Hook.remove) ``` Deregister the hook, if not called already. ``` show_doc(Hooks) ``` Acts as a `Collection` (i.e. `len(hooks)` and `hooks[i]`) and an `Iterator` (i.e. `for hook in hooks`) of a group of hooks, one for each module in `ms`, with the ability to remove all as a group. Use `stored` to get all hook results. `hook_func` and `is_forward` behavior is the same as [`Hook`](/callbacks.hooks.html#Hook). See the source code for [`HookCallback`](/callbacks.hooks.html#HookCallback) for a simple example. ``` show_doc(Hooks.remove) ``` Deregister all hooks created by this class, if not previously called. ## Convenience functions for hooks ``` show_doc(hook_output) ``` Function that creates a [`Hook`](/callbacks.hooks.html#Hook) for `module` that simply stores the output of the layer. ``` show_doc(hook_outputs) ``` Function that creates a [`Hook`](/callbacks.hooks.html#Hook) for all passed `modules` that simply stores the output of the layers. For example, the (slightly simplified) source code of [`model_sizes`](/callbacks.hooks.html#model_sizes) is: ```python def model_sizes(m, size): x = m(torch.zeros(1, in_channels(m), *size)) return [o.stored.shape for o in hook_outputs(m)] ``` ``` show_doc(model_sizes) show_doc(model_summary) ``` This method only works on a [`Learner`](/basic_train.html#Learner) object with `train_ds` in it. If it was created as a result of [`load_learner`](/basic_train.html#load_learner), there is no [`data`](/vision.data.html#vision.data) to run through the model and therefore it's not possible to create such summary. A sample `summary` looks like: ``` ====================================================================== Layer (type) Output Shape Param # Trainable ====================================================================== Conv2d [64, 176, 176] 9,408 False ______________________________________________________________________ BatchNorm2d [64, 176, 176] 128 True ______________________________________________________________________ ReLU [64, 176, 176] 0 False ______________________________________________________________________ MaxPool2d [64, 88, 88] 0 False ______________________________________________________________________ Conv2d [64, 88, 88] 36,864 False ... ``` Column definition: 1. **Layer (type)** is the name of the corresponding [`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module). 2. **Output Shape** is the shape of the output of the corresponding layer (minus the batch dimension, which is always the same and has no impact on the model params). 3. **Param #** is the number of weights (and optionally bias), and it will vary for each layer. The number of params is calculated differently for each layer type. Here is how it's calculated for some of the most common layer types: * Conv: `kernel_size*kernel_size*ch_in*ch_out` * Linear: `(n_in+bias) * n_out` * Batchnorm: `2 * n_out` * Embeddings: `n_embed * emb_sz` 4. **Trainable** indicates whether a layer is trainable or not. * Layers with `0` parameters are always Untrainable (e.g., `ReLU` and `MaxPool2d`). * Other layers are either Trainable or not, usually depending on whether they are frozen or not. See [Discriminative layer training](https://docs.fast.ai/basic_train.html#Discriminative-layer-training). To better understand this summary it helps to also execute `learn.model` and correlate the two outputs. Example: Let's feed to a [`Learner`](/basic_train.html#Learner) a dataset of 3-channel images size 352x352 and look at the model and its summary: ``` data.train_ds[0][0].data.shape learn = cnn_learner(data, models.resnet34, ...) print(learn.model) print(learn.summary()) ``` Here are the outputs with everything but the relevant to the example lines removed: ``` torch.Size([3, 352, 352]) [...] (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) [...] (3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) [...] (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (8): Linear(in_features=512, out_features=37, bias=True) ====================================================================== Layer (type) Output Shape Param # Trainable ====================================================================== Conv2d [64, 176, 176] 9,408 False ______________________________________________________________________ BatchNorm2d [64, 176, 176] 128 True ______________________________________________________________________ [...] MaxPool2d [64, 88, 88] 0 False ______________________________________________________________________ Conv2d [64, 88, 88] 36,864 False [...] ______________________________________________________________________ Linear [37] 18,981 True ``` **So let's calculate some params:** For the `Conv2d` layers, multiply the first 4 numbers from the corresponding layer definition: ``` Conv2d(3, 64, kernel_size=(7, 7), ...) 3*64*7*7 = 9,408 Conv2d(64, 64, kernel_size=(3, 3), ...) 64*64*3*3 = 36,864 ``` For the `BatchNorm2d` layer, multiply the first number by 2: ``` BatchNorm2d(64, ...) 64*2 = 128 ``` For `Linear` we multiply the first 2 and include the bias if it's `True`: ``` Linear(in_features=512, out_features=37, bias=True) (512+1)*37 = 18,981 ``` **Now let's calculate some output shapes:** We started with 3x352x352 image and run it through this layer: `Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)` How did we get: `[64, 176, 176]` The number of output channels is `64`, that's the first dimension in the number above. And then our image of `352x352` got convolved into `176x176` because of stride `2x2` (`352/2`). Then we had: `MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)` which reduced `[64, 176, 176]` to `[64, 88, 88]` again because of stride 2. And so on, finishing with: `Linear(in_features=512, out_features=37, bias=True)` which reduced everything to just `[37]`. ``` jekyll_warn("Known issue: `model_summary` and `Learner.summary` don't work with the AWD LSTM in text models.") show_doc(num_features_model) ``` It can be useful to get the size of each layer of a model (e.g. for printing a summary, or for generating cross-connections for a [`DynamicUnet`](/vision.models.unet.html#DynamicUnet)), however they depend on the size of the input. This function calculates the layer sizes by passing in a minimal tensor of `size`. ``` show_doc(dummy_batch) show_doc(dummy_eval) show_doc(HookCallback) ``` For all `modules`, uses a callback to automatically register a method `self.hook` (that you must define in an inherited class) as a hook. This method must have the signature: ```python def hook(self, m:Model, input:Tensors, output:Tensors) ``` If `do_remove` then the hook is automatically deregistered at the end of training. See [`ActivationStats`](/callbacks.hooks.html#ActivationStats) for a simple example of inheriting from this class. ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(HookCallback.on_train_begin) show_doc(HookCallback.on_train_end) ``` ## Undocumented Methods - Methods moved below this line will intentionally be hidden ``` show_doc(HookCallback.remove) show_doc(Hook.hook_fn) ``` ## New Methods - Please document or move to the undocumented section
github_jupyter
from fastai.gen_doc.nbdoc import * from fastai.callbacks.hooks import * from fastai.train import * from fastai.vision import * show_doc(ActivationStats) path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) #learn = cnn_learner(data, models.resnet18, callback_fns=ActivationStats) learn = Learner(data, simple_cnn((3,16,16,2)), callback_fns=ActivationStats) learn.fit(1) len(learn.data.train_dl),len(learn.activation_stats.modules) learn.activation_stats.stats.shape plt.plot(learn.activation_stats.stats[1][-2].numpy()); show_doc(ActivationStats.hook) show_doc(ActivationStats.on_train_begin) show_doc(ActivationStats.on_batch_end) show_doc(ActivationStats.on_train_end) show_doc(Hook) show_doc(Hook.remove) show_doc(Hooks) show_doc(Hooks.remove) show_doc(hook_output) show_doc(hook_outputs) def model_sizes(m, size): x = m(torch.zeros(1, in_channels(m), *size)) return [o.stored.shape for o in hook_outputs(m)] show_doc(model_sizes) show_doc(model_summary) ====================================================================== Layer (type) Output Shape Param # Trainable ====================================================================== Conv2d [64, 176, 176] 9,408 False ______________________________________________________________________ BatchNorm2d [64, 176, 176] 128 True ______________________________________________________________________ ReLU [64, 176, 176] 0 False ______________________________________________________________________ MaxPool2d [64, 88, 88] 0 False ______________________________________________________________________ Conv2d [64, 88, 88] 36,864 False ... data.train_ds[0][0].data.shape learn = cnn_learner(data, models.resnet34, ...) print(learn.model) print(learn.summary()) torch.Size([3, 352, 352]) [...] (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) [...] (3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) [...] (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (8): Linear(in_features=512, out_features=37, bias=True) ====================================================================== Layer (type) Output Shape Param # Trainable ====================================================================== Conv2d [64, 176, 176] 9,408 False ______________________________________________________________________ BatchNorm2d [64, 176, 176] 128 True ______________________________________________________________________ [...] MaxPool2d [64, 88, 88] 0 False ______________________________________________________________________ Conv2d [64, 88, 88] 36,864 False [...] ______________________________________________________________________ Linear [37] 18,981 True Conv2d(3, 64, kernel_size=(7, 7), ...) 3*64*7*7 = 9,408 Conv2d(64, 64, kernel_size=(3, 3), ...) 64*64*3*3 = 36,864 BatchNorm2d(64, ...) 64*2 = 128 Linear(in_features=512, out_features=37, bias=True) (512+1)*37 = 18,981 jekyll_warn("Known issue: `model_summary` and `Learner.summary` don't work with the AWD LSTM in text models.") show_doc(num_features_model) show_doc(dummy_batch) show_doc(dummy_eval) show_doc(HookCallback) def hook(self, m:Model, input:Tensors, output:Tensors) show_doc(HookCallback.on_train_begin) show_doc(HookCallback.on_train_end) show_doc(HookCallback.remove) show_doc(Hook.hook_fn)
0.827306
0.955527
# Introduction to TensorFlow Data Validation ## Learning Objectives 1. Review TFDV methods 2. Generate statistics 3. Visualize statistics 4. Infer a schema 5. Update a schema ## Introduction This lab is an introduction to TensorFlow Data Validation (TFDV), a key component of TensorFlow Extended. This lab serves as a foundation for understanding the features of TFDV and how it can help you understand, validate, and monitor your data. TFDV can be used for generating schemas and statistics about the distribution of every feature in the dataset. Such information is useful for comparing multiple datasets (e.g. training vs inference datasets) and reporting: Statistical differences in the features distribution TFDV also offers visualization capabilities for comparing datasets based on the Google PAIR Facets project. Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/production_ml/labs/tfdv_basic_spending.ipynb) -- try to complete that notebook first before reviewing this solution notebook. ### Import Libraries ``` !pip install pyarrow==5.0.0 !pip install numpy==1.19.2 !pip install tensorflow-data-validation ``` **Restart the kernel (Kernel > Restart kernel > Restart).** **Re-run the above cell and proceed further.** **Note: Please ignore any incompatibility warnings and errors.** ``` import pandas as pd import tensorflow_data_validation as tfdv import sys import warnings warnings.filterwarnings('ignore') print('Installing TensorFlow Data Validation') !pip install -q tensorflow_data_validation[visualization] print('TFDV version: {}'.format(tfdv.version.__version__)) # Confirm that we're using Python 3 assert sys.version_info.major is 3, 'Oops, not running Python 3. Use Runtime > Change runtime type' ``` ### Load the Consumer Spending Dataset We will download our dataset from Google Cloud Storage. The columns in the dataset are: * 'Graduated': Whether or not the person is a college graduate * 'Work Experience': The number of years in the workforce * 'Family Size': The size of the family unit * 'Spending Score': The spending score for consumer spending ``` # TODO score_train = pd.read_csv('data/score_train.csv') score_train.head() # TODO score_test = pd.read_csv('data/score_test.csv') score_test.head() score_train.info() ``` #### Review the methods present in TFDV ``` # check methods present in tfdv # TODO [methods for methods in dir(tfdv)] ``` ### Describing data with TFDV The usual workflow when using TFDV during training is as follows: 1. Generate statistics for the data 2. Use those statistics to generate a schema for each feature 3. Visualize the schema and statistics and manually inspect them 4. Update the schema if needed ### Compute and visualize statistics First we'll use [`tfdv.generate_statistics_from_csv`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv) to compute statistics for our training data. (ignore the snappy warnings) TFDV can compute descriptive [statistics](https://github.com/tensorflow/metadata/blob/v0.6.0/tensorflow_metadata/proto/v0/statistics.proto) that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions. Internally, TFDV uses [Apache Beam](https://beam.apache.org/)'s data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation. **NOTE: Compute statistics** * [tfdv.generate_statistics_from_csv](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv) * [tfdv.generate_statistics_from_dataframe](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_dataframe) * [tfdv.generate_statistics_from_tfrecord](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_tfrecord) #### Generate Statistics from a Pandas DataFrame ``` # Compute data statistics for the input pandas DataFrame. # TODO stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train) ``` Now let's use [`tfdv.visualize_statistics`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics), which uses [Facets](https://pair-code.github.io/facets/) to create a succinct visualization of our training data: * Notice that numeric features and categorical features are visualized separately, and that charts are displayed showing the distributions for each feature. * Notice that features with missing or zero values display a percentage in red as a visual indicator that there may be issues with examples in those features. The percentage is the percentage of examples that have missing or zero values for that feature. * Notice that there are no examples with values for `pickup_census_tract`. This is an opportunity for dimensionality reduction! * Try clicking "expand" above the charts to change the display * Try hovering over bars in the charts to display bucket ranges and counts * Try switching between the log and linear scales, and notice how the log scale reveals much more detail about the `payment_type` categorical feature * Try selecting "quantiles" from the "Chart to show" menu, and hover over the markers to show the quantile percentages ``` # Visualize the input statistics using Facets. # TODO tfdv.visualize_statistics(stats) ``` #### TFDV generates different types of statistics based on the type of features. **For numerical features, TFDV computes for every feature:** * Count of records * Number of missing (i.e. null values) * Histogram of values * Mean and standard deviation * Minimum and maximum values * Percentage of zero values **For categorical features, TFDV provides:** * Count of values * Percentage of missing values * Number of unique values * Average string length * Count for each label and its rank ### Let's compare the score_train and the score_test datasets ``` train_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train) test_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_test) tfdv.visualize_statistics( lhs_statistics=train_stats, lhs_name='TRAIN_DATASET', rhs_statistics=test_stats, rhs_name='NEW_DATASET') ``` ### Infer a schema Now let's use [`tfdv.infer_schema`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema) to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics. Getting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct. #### Generating Schema Once statistics are generated, the next step is to generate a schema for our dataset. This schema will map each feature in the dataset to a type (float, bytes, etc.). Also define feature boundaries (min, max, distribution of values and missings, etc.). Link to infer schema https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema With TFDV, we generate schema from statistics using ``` # Infers schema from the input statistics. # TODO schema = tfdv.infer_schema(statistics=stats) print(schema) ``` The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use [`tfdv.display_schema`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/display_schema) to display the inferred schema so that we can review it. ``` tfdv.display_schema(schema=schema) ``` #### TFDV provides a API to print a summary of each feature schema using In this visualization, the columns stand for: **Type** indicates the feature datatype. **Presence** indicates whether the feature must be present in 100% of examples (required) or not (optional). **Valency** indicates the number of values required per training example. **Domain and Values** indicates The feature domain and its values In the case of categorical features, single indicates that each training example must have exactly one category for the feature. ### Updating the Schema As stated above, **Presence** indicates whether the feature must be present in 100% of examples (required) or not (optional). Currently, all of our features except for our target label are shown as "optional". We need to make our features all required except for "Work Experience". We will need to update the schema. TFDV lets you update the schema according to your domain knowledge of the data if you are not satisfied by the auto-generated schema. We will update three use cases: Making a feature required, adding a value to a feature, and change a feature from a float to an integer. #### Change optional features to required. ``` # Update Family_Size from FLOAT to Int Graduated_feature = tfdv.get_feature(schema, 'Graduated') Graduated_feature.presence.min_fraction = 1.0 Profession_feature = tfdv.get_feature(schema, 'Profession') Profession_feature.presence.min_fraction = 1.0 Family_Size_feature = tfdv.get_feature(schema, 'Family_Size') Family_Size_feature.presence.min_fraction = 1.0 tfdv.display_schema(schema) ``` #### Update a feature with a new value Let's add "self-employed" to the Profession feature ``` Profession_domain = tfdv.get_domain(schema, 'Profession') Profession_domain.value.insert(0, 'Self-Employed') Profession_domain.value # [0 indicates I want 'Self-Employed to come first', if the number were 3, # it would be placed after the third value. ] ``` #### Let's remove "Homemaker" from "Profession" ``` Profession_domain = tfdv.get_domain(schema, 'Profession') Profession_domain.value.remove('Homemaker') Profession_domain.value ``` #### Change a feature from a float to an integer ``` # Update Family_Size to Int size = tfdv.get_feature(schema, 'Family_Size') size.type=2 tfdv.display_schema(schema) ``` In the next lab, you compare two datasets and check for anomalies. ## When to use TFDV It's easy to think of TFDV as only applying to the start of your training pipeline, as we did here, but in fact it has many uses. Here are a few more: * Validating new data for inference to make sure that we haven't suddenly started receiving bad features * Validating new data for inference to make sure that our model has trained on that part of the decision surface * Validating our data after we've transformed it and done feature engineering (probably using [TensorFlow Transform](https://www.tensorflow.org/tfx/guide/transform)) to make sure we haven't done something wrong https://github.com/GoogleCloudPlatform/mlops-on-gcp/blob/master/examples/tfdv-structured-data/tfdv-covertype.ipynb
github_jupyter
!pip install pyarrow==5.0.0 !pip install numpy==1.19.2 !pip install tensorflow-data-validation import pandas as pd import tensorflow_data_validation as tfdv import sys import warnings warnings.filterwarnings('ignore') print('Installing TensorFlow Data Validation') !pip install -q tensorflow_data_validation[visualization] print('TFDV version: {}'.format(tfdv.version.__version__)) # Confirm that we're using Python 3 assert sys.version_info.major is 3, 'Oops, not running Python 3. Use Runtime > Change runtime type' # TODO score_train = pd.read_csv('data/score_train.csv') score_train.head() # TODO score_test = pd.read_csv('data/score_test.csv') score_test.head() score_train.info() # check methods present in tfdv # TODO [methods for methods in dir(tfdv)] # Compute data statistics for the input pandas DataFrame. # TODO stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train) # Visualize the input statistics using Facets. # TODO tfdv.visualize_statistics(stats) train_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train) test_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_test) tfdv.visualize_statistics( lhs_statistics=train_stats, lhs_name='TRAIN_DATASET', rhs_statistics=test_stats, rhs_name='NEW_DATASET') # Infers schema from the input statistics. # TODO schema = tfdv.infer_schema(statistics=stats) print(schema) tfdv.display_schema(schema=schema) # Update Family_Size from FLOAT to Int Graduated_feature = tfdv.get_feature(schema, 'Graduated') Graduated_feature.presence.min_fraction = 1.0 Profession_feature = tfdv.get_feature(schema, 'Profession') Profession_feature.presence.min_fraction = 1.0 Family_Size_feature = tfdv.get_feature(schema, 'Family_Size') Family_Size_feature.presence.min_fraction = 1.0 tfdv.display_schema(schema) Profession_domain = tfdv.get_domain(schema, 'Profession') Profession_domain.value.insert(0, 'Self-Employed') Profession_domain.value # [0 indicates I want 'Self-Employed to come first', if the number were 3, # it would be placed after the third value. ] Profession_domain = tfdv.get_domain(schema, 'Profession') Profession_domain.value.remove('Homemaker') Profession_domain.value # Update Family_Size to Int size = tfdv.get_feature(schema, 'Family_Size') size.type=2 tfdv.display_schema(schema)
0.282493
0.992575
<a href="https://colab.research.google.com/github/sudar-coder321/Height_Weight_For_Gender_Data_Analysis/blob/main/Weight_Height_py.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import pandas as pd df = pd.read_csv("weight-height.csv") df.dtypes df.info df[0].to_categorical() df.Gender[df.Gender == 'Male'] = 1 df.Gender[df.Gender == 'Female'] = 0 gender = {'Male':1,'Female':0} df.Gender = [gender[item] for item in df.Gender] print(df) import matplotlib.pyplot as plt plt.style.use('seaborn') df.Height.plot(kind = 'hist',color='green',edgecolor='darkgreen',figsize=(10,7)) plt.title('Distribution of Height',size=25) plt.xlabel('Height(Inches)',size=20) plt.ylabel('Frequency',size=20) df.Weight.plot(kind='hist',color='purple',edgecolor='black',figsize=(12,7)) plt.title('Distribution of Weight',size=25) plt.xlabel('Weight(Inches)',size=20) plt.ylabel('Frequency',size=20) # Descriptive statistics male statistics_male = df[df['Gender'] == 'Male'].describe() statistics_male.rename(columns=lambda x: x + '_male', inplace=True) # Descriptive statistics female statistics_female = df[df['Gender'] == 'Female'].describe() statistics_female.rename(columns=lambda x: x + '_female', inplace=True) # Dataframe that contains statistics for both male and female statistics = pd.concat([statistics_male, statistics_female], axis=1) statistics ax1 = df[df['Gender'] == 'Male'].plot(kind='scatter', x='Height', y='Weight', color='blue', alpha=0.5, figsize=(10, 7)) df[df['Gender'] == 'Female'].plot(kind='scatter', x='Height', y='Weight', color='magenta', alpha=0.5, figsize=(10 ,7), ax=ax1) plt.legend(labels=['Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); # Scatter plot of 500 females sample_females = df[df['Gender'] == 'Female'].sample(500) sample_females.plot(kind='scatter', x='Height', y='Weight', color='magenta', alpha=0.5, figsize=(10, 7)) plt.legend(labels=['Females']) plt.title('Relationship between Height and Weight (sample of 500 females)', size=20) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); import numpy as np # best fit polynomials df_males = df[df['Gender'] == 'Male'] df_females = df[df['Gender'] == 'Female'] # polynomial - males male_fit = np.polyfit(df_males.Height, df_males.Weight, 1) # array([ 5.96177381, -224.49884071]) # polynomial - females female_fit = np.polyfit(df_females.Height, df_females.Weight, 1) # array([ 5.99404661, -246.01326575]) # scatter plots and regression lines # males and females dataframes df_males = df[df['Gender'] == 'Male'] df_females = df[df['Gender'] == 'Female'] # Scatter plots. ax1 = df_males.plot(kind='scatter', x='Height', y='Weight', color='blue', alpha=0.5, figsize=(10, 7)) df_females.plot(kind='scatter', x='Height', y='Weight', color='magenta', alpha=0.5, figsize=(10, 7), ax=ax1) # regression lines plt.plot(df_males.Height, male_fit[0] * df_males.Height + male_fit[1], color='darkblue', linewidth=2) plt.plot(df_females.Height, female_fit[0] * df_females.Height + female_fit[1], color='deeppink', linewidth=2) # regression equations plt.text(65, 230, 'y={:.2f}+{:.2f}*x'.format(male_fit[1], male_fit[0]), color='darkblue', size=12) plt.text(70, 130, 'y={:.2f}+{:.2f}*x'.format(female_fit[1], female_fit[0]), color='deeppink', size=12) # legend, title and labels. plt.legend(labels=['Males Regresion Line', 'Females Regresion Line', 'Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); import seaborn as sns # regression plot using seaborn fig = plt.figure(figsize=(10, 7)) sns.regplot(x=df_males.Height, y=df_males.Weight, color='blue', marker='+') sns.regplot(x=df_females.Height, y=df_females.Weight, color='magenta', marker='+') # Legend, title and labels. plt.legend(labels=['Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); import seaborn as sns # 300 random samples df_males_sample = df[df['Gender'] == 'Male'].sample(200) df_females_sample = df[df['Gender'] == 'Female'].sample(200) # regression plot using seaborn fig = plt.figure(figsize=(20, 14)) sns.regplot(x=df_males_sample.Height, y=df_males_sample.Weight, color='blue', marker='+') sns.regplot(x=df_females_sample.Height, y=df_females_sample.Weight, color='magenta', marker='+') # legend, title, and labels. plt.legend(labels=['Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); from sklearn.linear_model import LinearRegression df_males = df[df['Gender'] == 'Male'] # create linear regression object lr_males = LinearRegression() # fit linear regression lr_males.fit(df_males[['Height']], df_males['Weight']) # get the slope and intercept of the line best fit print(lr_males.intercept_) # -224.49884070545772 print(lr_males.coef_) # 5.96177381 df_females = df[df['Gender'] == 'Female'] # create linear regression object lr_females = LinearRegression() # fit linear regression lr_females.fit(df_females[['Height']], df_females['Weight']) # get the slope and intercept of the line best fit print(lr_females.intercept_) # -246.01326574667277 print(lr_females.coef_) # 5.99404661 df_females = df[df['Gender'] == 'Female'] # fit the model using numpy female_fit = np.polyfit(df_females.Height, df_females.Weight, 1) # predictions using numpy print(np.polyval(female_fit, [60])) # [113.62953114] # fit the model using scikit learn lr_females = LinearRegression() lr_females.fit(df_females[['Height']], df_females['Weight']) # predictions using scikit learn print(lr_females.predict([[60]])) # [113.62953114] # dataframe containing only females df_females = df[df['Gender'] == 'Female'] # correlation coefficients df_females.corr() df_males = df[df['Gender'] =='Male'] df_males.corr() #method of using scipy stats from scipy import stats # dataframe containing only females df_females = df[df['Gender'] == 'Female'] # pearson correlation coefficient and p-value pearson_coef, p_value = stats.pearsonr(df_females.Height, df_females.Weight) print(pearson_coef) # 0.849608591418601 # dataframe containing only males df_males = df[df['Gender'] == 'Male'] # pearson correlation coefficient and p-value pearson_coef, p_value = stats.pearsonr(df_males.Height, df_males.Weight) print(pearson_coef) # 0.8629788486163176 import seaborn as sns # dataframe containing only females df_females = df[df['Gender'] == 'Female'].sample(500) # residual plot 500 females fig = plt.figure(figsize = (10, 7)) sns.residplot(df_females.Height, df_females.Weight, color='magenta') # title and labels plt.title('Residual plot 500 females', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); df_males = df[df['Gender']=='Male'].sample(500) fig = plt.figure(figsize = (10,7)) sns.residplot(df_males.Height,df_males.Weight,color='blue') plt.title("Residual Plot of 500 Males") plt.xlabel("Height(Males)") plt.ylabel("Weight(Males)") # drop female column df_dummy.drop('Gender_Female', axis=1, inplace=True) # rename Gender_Male column df_dummy.rename(columns={'Gender_Male': 'Gender'}, inplace=True) # df_dummy dataframe first 5 columns df_dummy.head() ```
github_jupyter
import pandas as pd df = pd.read_csv("weight-height.csv") df.dtypes df.info df[0].to_categorical() df.Gender[df.Gender == 'Male'] = 1 df.Gender[df.Gender == 'Female'] = 0 gender = {'Male':1,'Female':0} df.Gender = [gender[item] for item in df.Gender] print(df) import matplotlib.pyplot as plt plt.style.use('seaborn') df.Height.plot(kind = 'hist',color='green',edgecolor='darkgreen',figsize=(10,7)) plt.title('Distribution of Height',size=25) plt.xlabel('Height(Inches)',size=20) plt.ylabel('Frequency',size=20) df.Weight.plot(kind='hist',color='purple',edgecolor='black',figsize=(12,7)) plt.title('Distribution of Weight',size=25) plt.xlabel('Weight(Inches)',size=20) plt.ylabel('Frequency',size=20) # Descriptive statistics male statistics_male = df[df['Gender'] == 'Male'].describe() statistics_male.rename(columns=lambda x: x + '_male', inplace=True) # Descriptive statistics female statistics_female = df[df['Gender'] == 'Female'].describe() statistics_female.rename(columns=lambda x: x + '_female', inplace=True) # Dataframe that contains statistics for both male and female statistics = pd.concat([statistics_male, statistics_female], axis=1) statistics ax1 = df[df['Gender'] == 'Male'].plot(kind='scatter', x='Height', y='Weight', color='blue', alpha=0.5, figsize=(10, 7)) df[df['Gender'] == 'Female'].plot(kind='scatter', x='Height', y='Weight', color='magenta', alpha=0.5, figsize=(10 ,7), ax=ax1) plt.legend(labels=['Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); # Scatter plot of 500 females sample_females = df[df['Gender'] == 'Female'].sample(500) sample_females.plot(kind='scatter', x='Height', y='Weight', color='magenta', alpha=0.5, figsize=(10, 7)) plt.legend(labels=['Females']) plt.title('Relationship between Height and Weight (sample of 500 females)', size=20) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); import numpy as np # best fit polynomials df_males = df[df['Gender'] == 'Male'] df_females = df[df['Gender'] == 'Female'] # polynomial - males male_fit = np.polyfit(df_males.Height, df_males.Weight, 1) # array([ 5.96177381, -224.49884071]) # polynomial - females female_fit = np.polyfit(df_females.Height, df_females.Weight, 1) # array([ 5.99404661, -246.01326575]) # scatter plots and regression lines # males and females dataframes df_males = df[df['Gender'] == 'Male'] df_females = df[df['Gender'] == 'Female'] # Scatter plots. ax1 = df_males.plot(kind='scatter', x='Height', y='Weight', color='blue', alpha=0.5, figsize=(10, 7)) df_females.plot(kind='scatter', x='Height', y='Weight', color='magenta', alpha=0.5, figsize=(10, 7), ax=ax1) # regression lines plt.plot(df_males.Height, male_fit[0] * df_males.Height + male_fit[1], color='darkblue', linewidth=2) plt.plot(df_females.Height, female_fit[0] * df_females.Height + female_fit[1], color='deeppink', linewidth=2) # regression equations plt.text(65, 230, 'y={:.2f}+{:.2f}*x'.format(male_fit[1], male_fit[0]), color='darkblue', size=12) plt.text(70, 130, 'y={:.2f}+{:.2f}*x'.format(female_fit[1], female_fit[0]), color='deeppink', size=12) # legend, title and labels. plt.legend(labels=['Males Regresion Line', 'Females Regresion Line', 'Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); import seaborn as sns # regression plot using seaborn fig = plt.figure(figsize=(10, 7)) sns.regplot(x=df_males.Height, y=df_males.Weight, color='blue', marker='+') sns.regplot(x=df_females.Height, y=df_females.Weight, color='magenta', marker='+') # Legend, title and labels. plt.legend(labels=['Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); import seaborn as sns # 300 random samples df_males_sample = df[df['Gender'] == 'Male'].sample(200) df_females_sample = df[df['Gender'] == 'Female'].sample(200) # regression plot using seaborn fig = plt.figure(figsize=(20, 14)) sns.regplot(x=df_males_sample.Height, y=df_males_sample.Weight, color='blue', marker='+') sns.regplot(x=df_females_sample.Height, y=df_females_sample.Weight, color='magenta', marker='+') # legend, title, and labels. plt.legend(labels=['Males', 'Females']) plt.title('Relationship between Height and Weight', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); from sklearn.linear_model import LinearRegression df_males = df[df['Gender'] == 'Male'] # create linear regression object lr_males = LinearRegression() # fit linear regression lr_males.fit(df_males[['Height']], df_males['Weight']) # get the slope and intercept of the line best fit print(lr_males.intercept_) # -224.49884070545772 print(lr_males.coef_) # 5.96177381 df_females = df[df['Gender'] == 'Female'] # create linear regression object lr_females = LinearRegression() # fit linear regression lr_females.fit(df_females[['Height']], df_females['Weight']) # get the slope and intercept of the line best fit print(lr_females.intercept_) # -246.01326574667277 print(lr_females.coef_) # 5.99404661 df_females = df[df['Gender'] == 'Female'] # fit the model using numpy female_fit = np.polyfit(df_females.Height, df_females.Weight, 1) # predictions using numpy print(np.polyval(female_fit, [60])) # [113.62953114] # fit the model using scikit learn lr_females = LinearRegression() lr_females.fit(df_females[['Height']], df_females['Weight']) # predictions using scikit learn print(lr_females.predict([[60]])) # [113.62953114] # dataframe containing only females df_females = df[df['Gender'] == 'Female'] # correlation coefficients df_females.corr() df_males = df[df['Gender'] =='Male'] df_males.corr() #method of using scipy stats from scipy import stats # dataframe containing only females df_females = df[df['Gender'] == 'Female'] # pearson correlation coefficient and p-value pearson_coef, p_value = stats.pearsonr(df_females.Height, df_females.Weight) print(pearson_coef) # 0.849608591418601 # dataframe containing only males df_males = df[df['Gender'] == 'Male'] # pearson correlation coefficient and p-value pearson_coef, p_value = stats.pearsonr(df_males.Height, df_males.Weight) print(pearson_coef) # 0.8629788486163176 import seaborn as sns # dataframe containing only females df_females = df[df['Gender'] == 'Female'].sample(500) # residual plot 500 females fig = plt.figure(figsize = (10, 7)) sns.residplot(df_females.Height, df_females.Weight, color='magenta') # title and labels plt.title('Residual plot 500 females', size=24) plt.xlabel('Height (inches)', size=18) plt.ylabel('Weight (pounds)', size=18); df_males = df[df['Gender']=='Male'].sample(500) fig = plt.figure(figsize = (10,7)) sns.residplot(df_males.Height,df_males.Weight,color='blue') plt.title("Residual Plot of 500 Males") plt.xlabel("Height(Males)") plt.ylabel("Weight(Males)") # drop female column df_dummy.drop('Gender_Female', axis=1, inplace=True) # rename Gender_Male column df_dummy.rename(columns={'Gender_Male': 'Gender'}, inplace=True) # df_dummy dataframe first 5 columns df_dummy.head()
0.567697
0.919281
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Visualization/image_stretch.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Visualization/image_stretch.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Visualization/image_stretch.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Visualization/image_stretch.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The magic command `%%capture` can be used to hide output from a specific cell. ``` # %%capture # !pip install earthengine-api # !pip install geehydro ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for this first time or if you are getting an authentication error. ``` # ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ``` # Load a Landsat 8 raw image. image = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318') # Define a RasterSymbolizer element with '_enhance_' for a placeholder. template_sld = \ '<RasterSymbolizer>' + \ '<ContrastEnhancement><_enhance_/></ContrastEnhancement>' + \ '<ChannelSelection>' + \ '<RedChannel>' + \ '<SourceChannelName>B5</SourceChannelName>' + \ '</RedChannel>' + \ '<GreenChannel>' + \ '<SourceChannelName>B4</SourceChannelName>' + \ '</GreenChannel>' + \ '<BlueChannel>' + \ '<SourceChannelName>B3</SourceChannelName>' + \ '</BlueChannel>' + \ '</ChannelSelection>' + \ '</RasterSymbolizer>' # Get SLDs with different enhancements. equalize_sld = template_sld.replace('_enhance_', 'Histogram') normalize_sld = template_sld.replace('_enhance_', 'Normalize') # Display the results. Map.centerObject(image, 10) Map.addLayer(image, {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 15000}, 'Linear') Map.addLayer(image.sldStyle(equalize_sld), {}, 'Equalized') Map.addLayer(image.sldStyle(normalize_sld), {}, 'Normalized') ``` ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
# %%capture # !pip install earthengine-api # !pip install geehydro import ee import folium import geehydro # ee.Authenticate() ee.Initialize() Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') # Load a Landsat 8 raw image. image = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318') # Define a RasterSymbolizer element with '_enhance_' for a placeholder. template_sld = \ '<RasterSymbolizer>' + \ '<ContrastEnhancement><_enhance_/></ContrastEnhancement>' + \ '<ChannelSelection>' + \ '<RedChannel>' + \ '<SourceChannelName>B5</SourceChannelName>' + \ '</RedChannel>' + \ '<GreenChannel>' + \ '<SourceChannelName>B4</SourceChannelName>' + \ '</GreenChannel>' + \ '<BlueChannel>' + \ '<SourceChannelName>B3</SourceChannelName>' + \ '</BlueChannel>' + \ '</ChannelSelection>' + \ '</RasterSymbolizer>' # Get SLDs with different enhancements. equalize_sld = template_sld.replace('_enhance_', 'Histogram') normalize_sld = template_sld.replace('_enhance_', 'Normalize') # Display the results. Map.centerObject(image, 10) Map.addLayer(image, {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 15000}, 'Linear') Map.addLayer(image.sldStyle(equalize_sld), {}, 'Equalized') Map.addLayer(image.sldStyle(normalize_sld), {}, 'Normalized') Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map
0.710427
0.955068
## Project 4 Hack-a-thon ### Max Bermont, Yunus Herman, Grant Hicks ### Problem Statement: We have been tasked with creating a model that will predict if a person's income is greater than \\$50,000 given certain profile information. The information that we were given to create our model is from an extraction of data from the 1994 Census database. Since the client which hired us to create the model wants things to be as 'cost-efficient' as possible, they only provided us with a limited amount of data to create our model. We want to know if we can make a prediction if someone is likely to have an income of greater than \\$50,000 using the limited amount of data that we have at our disposal. ``` #Imports import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score from sklearn.svm import SVC from sklearn.tree import ExtraTreeClassifier, DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix, classification_report, f1_score from sklearn.neighbors import KNeighborsClassifier ``` ### Reading in the Data ``` # Reading in the dataframe as df df = pd.read_csv('./data/cheap_train_sample.csv') #read in test_data.csv test = pd.read_csv('./data/test_data.csv') # Checking come basic info, determining data types df.info() ``` We have 6,513 rows with 14 columns. So far it looks like there are no null values, but we will have to same some values to numerical values. ``` # Looking at description of numerical data df.describe() #Changes representation of Unknown values. df.replace(to_replace= ' ?', value='other', inplace=True) plt.figure(figsize = (10,8)) plt.hist(df['age']); #sns.catplot(x ='age', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Age',fontsize = 16,fontweight='bold') plt.xticks(rotation = 0, fontsize = 12) plt.yticks(rotation = 0, fontsize = 12); # Checking correlation heatmap for any standout correlations sns.heatmap(df.corr(), cmap='coolwarm') # clean object features to be numeric. # a lot of features for a limited amount of unique data points. df#%% plt.figure(figsize = (10,8)) #plt.hist(df['workclass']); sns.catplot(x ='workclass', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Workclass',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 12) plt.yticks(rotation = 0, fontsize = 12); plt.figure(figsize = (10,8)) plt.hist(df['fnlwgt']); #sns.catplot(x ='fnlwgt', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Final Wighting',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 12) plt.yticks(rotation = 0, fontsize = 12); df_ed = df.sort_values(by = 'education') plt.figure(figsize = (20,8)) #plt.hist(df['education']); sns.catplot(x ='education', hue ='sex', kind ='count', data = df_ed, height = 12) plt.title('The distribution of Education',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 12); plt.figure(figsize = (10,8)) #plt.hist(df['marital-status']) sns.catplot(x ='marital-status', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Marital Status',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 12); plt.figure(figsize = (10,8)) #plt.hist(df['occupation']) sns.catplot(x ='occupation', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Ocupation',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); plt.figure(figsize = (10,8)) #plt.hist(df['relationship']) sns.catplot(x ='relationship', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Relationship',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); plt.figure(figsize = (10,8)) plt.hist(df['capital-gain'], bins = 20) #sns.catplot(x ='capital-gain', hue ='sex', kind ='count', data = df, height = 8, bins = 20) plt.title('The distribution of Capital - gain',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); plt.figure(figsize = (10,8)) plt.hist(df['capital-loss'], bins = 20) #sns.catplot(x ='capital-gain', hue ='sex', kind ='count', data = df, height = 8, bins = 20) plt.title('The distribution of Capital - loss',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); ``` ## Preparing the Data ``` df = pd.get_dummies(df, columns=['occupation','relationship', 'sex', 'marital-status', 'workclass', 'native-country'], drop_first=True) test = pd.get_dummies(test, columns=['occupation','relationship', 'sex', 'marital-status', 'workclass', 'native-country'], drop_first=True) print(f'Number of Columns in Training data: ',len(df.columns)) print(f'NUmber of Columns in Testing data: ', len(test.columns)) #note the columns are the same length and df.wage.value_counts() #below or equal to 50k will be true. # Creating our y-column df['wage'] = [0 if wage == ' <=50K' else 1 for wage in df.wage] df.wage.value_counts() # Splitting the data into our train and test sets, we are dropping the 'education' column as # they also provied us with a numerical version of that column X = df.drop(columns=['wage', 'education']) features = X.columns y = df['wage'] X_train, X_val, y_train, y_val = train_test_split(X,y, random_state=420) #Baseline score guess = [1 if pred == 1 else 0 for pred in y] sum(guess)/len(guess) ``` ## Checking Models #### Random Forest ``` rfc = RandomForestClassifier() cross_val_score(rfc, X,y, cv=5).mean() ``` #### SVC ``` svc = SVC() cross_val_score(svc,X,y,cv= 5).mean() ``` #### Ada boost ``` boost = AdaBoostClassifier() cross_val_score(boost,X,y,cv=5).mean() ``` #### Logistic Regression ``` logr= LogisticRegression() cross_val_score(logr,X,y,cv=5).mean() ``` #### Decision Tree ``` tree = DecisionTreeClassifier() cross_val_score(tree, X,y,cv=5).mean() ``` #### Extra Trees ``` x_trees = ExtraTreeClassifier() cross_val_score(x_trees, X,y,cv= 5).mean() ``` #### K-Nearest Neighbors ``` knn = KNeighborsClassifier() cross_val_score(knn, X,y,cv= 5).mean() ``` | TEST | Cross Val Score | |:-------------------:|:---------------:| | Random Forest | 0.8477 | | SVC | 0.7683 | | Ada boost | 0.8577 | | Logistic Regression | 0.7926 | | Decision Tree | 0.8036 | | Extra Trees | 0.7887 | | K-Nearest Neighbors | 0.7643 | | | | With these scores we decided to move forward using an ada boost model. We found it to have the best score while being the least overfit of the models that we ran. ### Setting up models with pipeline We did some testing setting up a pipeline for our models to confrim that there was not something to tweak in another model that would get us a better score than the ada boost. ``` pipe = Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier())]) pipe.fit(X_train, y_train) print(f'Training Score: ', pipe.score(X_train,y_train)) print(f'Validation Score: ', pipe.score(X_val, y_val)) predictions = pipe.predict(X_val) print(confusion_matrix(y_val, predictions)) print(classification_report(y_val, predictions)) pipe = Pipeline([('scaler', StandardScaler()),('boost', AdaBoostClassifier())]) pipe.fit(X_train, y_train) print(f'Training Score: ', pipe.score(X_train,y_train)) print(f'Validation Score: ', pipe.score(X_val, y_val)) predictions = pipe.predict(X_val) print(confusion_matrix(y_val, predictions)) print(classification_report(y_val, predictions)) ``` Our ada boost still leaves us with the best score, and the fewest amount of false negaitves, which had been worrying us as the number of false negatives always seemed to be large compared to the total number of negatives. ### Adjusting paramaters with grid gearch To further nail down how we can improve the model and perhaps get rid of some of those faslse negatives we looked to tune the parameters with a grid search. ``` boost = AdaBoostClassifier() boost.fit(X_train,y_train) values = boost.feature_importances_ feat = pd.DataFrame(features) feat['values'] = values df_feat = feat.sort_values(by= 'values', ascending=False).head(20) df_feat.columns = ['features','values'] df_feat plt.figure(figsize = (17,8)) plt.title('The distribution of feature importance',fontsize = 15) sns.barplot(x = 'features',y = 'values', data = df_feat) #df_feat.plot(kind='bar') plt.xticks(rotation = 30, fontsize = 9) plt.yticks(rotation = 0, fontsize = 10); #weight features and adjust hyperparameters. #what if for feature with an importance below .1 we multiply the estimators = np.random.randint(10,300,25) params = dict(n_estimators= estimators) grid = GridSearchCV(AdaBoostClassifier(), param_grid=params, n_jobs=-1, cv= 5, verbose= 1) grid.fit(X_train,y_train) print(f'Training Score: ', grid.score(X_train,y_train)) print(f'Validation Score: ', grid.score(X_val,y_val)) print(grid.best_params_) print(grid.best_score_) l_rate = [0.1291549665014884] #np.logspace(-2,-0.8,100) estimators = [131] #np.random.randint(100,200,5) criterion = ['mse'] #('friedman_mse', 'mse', 'mae') max_features = [None] # [None, 'sqrt', 'log2'] params = dict(n_estimators= estimators, learning_rate= l_rate, criterion= criterion, max_features= max_features) grid = GridSearchCV(GradientBoostingClassifier(), param_grid=params, n_jobs=-1, cv= 5, verbose= 1) grid.fit(X_train,y_train) print(f'Training Score: ', grid.score(X_train,y_train)) print(f'Validation Score: ', grid.score(X_val,y_val)) print(grid.best_params_) print(grid.best_score_) ``` The grid search provided us with the best paramets to use with our model. ``` best_params = grid.best_params_ #need to convert strings to best_params = {key: [value] for key, value in best_params.items()} ``` ### Making the Predictions ``` X_test = test.drop(columns=['education']) test_grid = GridSearchCV(GradientBoostingClassifier(), param_grid= best_params, n_jobs=-1, cv=5, verbose= 1) test_grid.fit(X,y) pred = test_grid.predict(X_test) submit = pd.DataFrame(pred, columns=['wage']) submit.to_csv('submission.csv', index=False) ``` ## Conclusion Our final models performs much better than the baseline with an accuracy of 87% on our testing data. We were able to put together a model with the data provided to us and tweak our features and parameters to create the best model that we could with the given data.
github_jupyter
#Imports import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score from sklearn.svm import SVC from sklearn.tree import ExtraTreeClassifier, DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix, classification_report, f1_score from sklearn.neighbors import KNeighborsClassifier # Reading in the dataframe as df df = pd.read_csv('./data/cheap_train_sample.csv') #read in test_data.csv test = pd.read_csv('./data/test_data.csv') # Checking come basic info, determining data types df.info() # Looking at description of numerical data df.describe() #Changes representation of Unknown values. df.replace(to_replace= ' ?', value='other', inplace=True) plt.figure(figsize = (10,8)) plt.hist(df['age']); #sns.catplot(x ='age', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Age',fontsize = 16,fontweight='bold') plt.xticks(rotation = 0, fontsize = 12) plt.yticks(rotation = 0, fontsize = 12); # Checking correlation heatmap for any standout correlations sns.heatmap(df.corr(), cmap='coolwarm') # clean object features to be numeric. # a lot of features for a limited amount of unique data points. df#%% plt.figure(figsize = (10,8)) #plt.hist(df['workclass']); sns.catplot(x ='workclass', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Workclass',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 12) plt.yticks(rotation = 0, fontsize = 12); plt.figure(figsize = (10,8)) plt.hist(df['fnlwgt']); #sns.catplot(x ='fnlwgt', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Final Wighting',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 12) plt.yticks(rotation = 0, fontsize = 12); df_ed = df.sort_values(by = 'education') plt.figure(figsize = (20,8)) #plt.hist(df['education']); sns.catplot(x ='education', hue ='sex', kind ='count', data = df_ed, height = 12) plt.title('The distribution of Education',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 12); plt.figure(figsize = (10,8)) #plt.hist(df['marital-status']) sns.catplot(x ='marital-status', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Marital Status',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 12); plt.figure(figsize = (10,8)) #plt.hist(df['occupation']) sns.catplot(x ='occupation', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Ocupation',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); plt.figure(figsize = (10,8)) #plt.hist(df['relationship']) sns.catplot(x ='relationship', hue ='sex', kind ='count', data = df, height = 8) plt.title('The distribution of Relationship',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); plt.figure(figsize = (10,8)) plt.hist(df['capital-gain'], bins = 20) #sns.catplot(x ='capital-gain', hue ='sex', kind ='count', data = df, height = 8, bins = 20) plt.title('The distribution of Capital - gain',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); plt.figure(figsize = (10,8)) plt.hist(df['capital-loss'], bins = 20) #sns.catplot(x ='capital-gain', hue ='sex', kind ='count', data = df, height = 8, bins = 20) plt.title('The distribution of Capital - loss',fontsize = 16,fontweight='bold') plt.xticks(rotation = 30, fontsize = 10) plt.yticks(rotation = 0, fontsize = 10); df = pd.get_dummies(df, columns=['occupation','relationship', 'sex', 'marital-status', 'workclass', 'native-country'], drop_first=True) test = pd.get_dummies(test, columns=['occupation','relationship', 'sex', 'marital-status', 'workclass', 'native-country'], drop_first=True) print(f'Number of Columns in Training data: ',len(df.columns)) print(f'NUmber of Columns in Testing data: ', len(test.columns)) #note the columns are the same length and df.wage.value_counts() #below or equal to 50k will be true. # Creating our y-column df['wage'] = [0 if wage == ' <=50K' else 1 for wage in df.wage] df.wage.value_counts() # Splitting the data into our train and test sets, we are dropping the 'education' column as # they also provied us with a numerical version of that column X = df.drop(columns=['wage', 'education']) features = X.columns y = df['wage'] X_train, X_val, y_train, y_val = train_test_split(X,y, random_state=420) #Baseline score guess = [1 if pred == 1 else 0 for pred in y] sum(guess)/len(guess) rfc = RandomForestClassifier() cross_val_score(rfc, X,y, cv=5).mean() svc = SVC() cross_val_score(svc,X,y,cv= 5).mean() boost = AdaBoostClassifier() cross_val_score(boost,X,y,cv=5).mean() logr= LogisticRegression() cross_val_score(logr,X,y,cv=5).mean() tree = DecisionTreeClassifier() cross_val_score(tree, X,y,cv=5).mean() x_trees = ExtraTreeClassifier() cross_val_score(x_trees, X,y,cv= 5).mean() knn = KNeighborsClassifier() cross_val_score(knn, X,y,cv= 5).mean() pipe = Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier())]) pipe.fit(X_train, y_train) print(f'Training Score: ', pipe.score(X_train,y_train)) print(f'Validation Score: ', pipe.score(X_val, y_val)) predictions = pipe.predict(X_val) print(confusion_matrix(y_val, predictions)) print(classification_report(y_val, predictions)) pipe = Pipeline([('scaler', StandardScaler()),('boost', AdaBoostClassifier())]) pipe.fit(X_train, y_train) print(f'Training Score: ', pipe.score(X_train,y_train)) print(f'Validation Score: ', pipe.score(X_val, y_val)) predictions = pipe.predict(X_val) print(confusion_matrix(y_val, predictions)) print(classification_report(y_val, predictions)) boost = AdaBoostClassifier() boost.fit(X_train,y_train) values = boost.feature_importances_ feat = pd.DataFrame(features) feat['values'] = values df_feat = feat.sort_values(by= 'values', ascending=False).head(20) df_feat.columns = ['features','values'] df_feat plt.figure(figsize = (17,8)) plt.title('The distribution of feature importance',fontsize = 15) sns.barplot(x = 'features',y = 'values', data = df_feat) #df_feat.plot(kind='bar') plt.xticks(rotation = 30, fontsize = 9) plt.yticks(rotation = 0, fontsize = 10); #weight features and adjust hyperparameters. #what if for feature with an importance below .1 we multiply the estimators = np.random.randint(10,300,25) params = dict(n_estimators= estimators) grid = GridSearchCV(AdaBoostClassifier(), param_grid=params, n_jobs=-1, cv= 5, verbose= 1) grid.fit(X_train,y_train) print(f'Training Score: ', grid.score(X_train,y_train)) print(f'Validation Score: ', grid.score(X_val,y_val)) print(grid.best_params_) print(grid.best_score_) l_rate = [0.1291549665014884] #np.logspace(-2,-0.8,100) estimators = [131] #np.random.randint(100,200,5) criterion = ['mse'] #('friedman_mse', 'mse', 'mae') max_features = [None] # [None, 'sqrt', 'log2'] params = dict(n_estimators= estimators, learning_rate= l_rate, criterion= criterion, max_features= max_features) grid = GridSearchCV(GradientBoostingClassifier(), param_grid=params, n_jobs=-1, cv= 5, verbose= 1) grid.fit(X_train,y_train) print(f'Training Score: ', grid.score(X_train,y_train)) print(f'Validation Score: ', grid.score(X_val,y_val)) print(grid.best_params_) print(grid.best_score_) best_params = grid.best_params_ #need to convert strings to best_params = {key: [value] for key, value in best_params.items()} X_test = test.drop(columns=['education']) test_grid = GridSearchCV(GradientBoostingClassifier(), param_grid= best_params, n_jobs=-1, cv=5, verbose= 1) test_grid.fit(X,y) pred = test_grid.predict(X_test) submit = pd.DataFrame(pred, columns=['wage']) submit.to_csv('submission.csv', index=False)
0.540924
0.956431
``` %matplotlib inline import quimb as qu import quimb.tensor as qtn ``` First we create the random MERA state (currently the number of sites ``n`` must be a power of 2): ``` n = 128 mera = qtn.MERA.rand_invar(n) ``` We also can set up some default graphing options, namely, to pin the physical (outer) indices in circle: ``` from math import cos, sin, pi fix = { 'k{}'.format(i): (sin(2 * pi * i / n), cos(2 * pi * i / n)) for i in range(n) } # reduce the 'spring constant' k as well graph_opts = dict(fix=fix, k=0.01) ``` By default, the MERA constructor adds the ``'_ISO'`` and ``'_UNI'`` tensor tags to demark the isometries and unitaries respectively: ``` mera.graph(color=['_UNI', '_ISO'], **graph_opts) ``` It also tags each layer with ``'_LAYER2'`` etc.: ``` mera.graph(color=[f'_LAYER{i}' for i in range(7)], **graph_opts) ``` Finally, the site-tags of each initial tensor (``'I0'``, ``'I1'``, ``'I3'``, etc.) are propagated up through the isometries and unitaries, forming effective 'lightcones' for each site. Here, for example, we plot the lightcone of site 0, 40, and 80: ``` mera.graph(color=['I0', 'I40', 'I80'], **graph_opts) ``` Computing Local Quantities ---------------------------------------- In a MERA state, local quantities depend only on this lightcone. The way that ``quimb.tensor`` works supports this very naturally. Firstly, you can easily select all the tensors with site tag ``i``, i.e. the causal cone, with ``MERA.select(i)``: ``` # select all tensors relevant for site-0 mera.select(0).graph(color=[f'_LAYER{i}' for i in range(7)]) ``` Secondly, when combined with its conjugate network, all the dangling indices automatically match up. As an example, consider the state norm, but calculated for site 80 only: ``` nrm80 = mera.select(80).H & mera.select(80) nrm80.graph(color=[f'_LAYER{i}' for i in range(7)]) ``` We can contract this whole subnetwork efficiently to compute the actual value: ``` nrm80 ^ all ``` As expected. Or consider we want to measure $\langle \psi | X_i Z_j | \psi \rangle$: ``` i, j = 50, 100 ij_tags = mera.site_tag(i), mera.site_tag(j) ij_tags ``` Now we can select the subnetwork of tensors with *either* the site 50 or site 100 lightcone (and also conjugate to form $\langle \psi |$): ``` mera_ij_H = mera.select(ij_tags, which='any').H ``` For $X_i Z_j | \psi \rangle$ we'll first apply the X and Z operators. By default the gate operation propagates the site tags to the applied operators as well, or we could use ``contract=True`` to actively contract them into the MERA: ``` X = qu.pauli('X') Z = qu.pauli('X') XY_mera_ij = ( mera .gate(X, i) .gate(Z, j) .select(ij_tags, which='any') ) ``` Now we can lazily form the tensor network of this expectation value: ``` exp_XZ_ij = (mera_ij_H & XY_mera_ij) exp_XZ_ij.graph(color=[f'_LAYER{i}' for i in range(7)]) ``` Which we can efficiently contract: ``` exp_XZ_ij ^ all %%timeit exp_XZ_ij ^ all ``` ``` # generate the 'bra' state mera_H = mera.H.reindex_sites('b{}', range(20)) ``` We again only need the tensors in the causal cones of these 20 sites: ``` # NB we have to slice *before* combining the subnetworks here. # this is because paired indices are mangled when joining # two networks -> only dangling indices are guranteed to # retain their value rho = ( mera_H.select(slice(20), which='any') & mera.select(slice(20), which='any') ) ``` We can see what this density operator looks like as a tensor network: ``` rho.graph(color=[f'_LAYER{i}' for i in range(7)]) ``` Or we can plot the sites (note that each initial unitary is two sites, and later color tags take precedence, thus the 'every-other' coloring effect): ``` rho.graph(color=[f'I{i}' for i in range(20)]) ``` This density matrix is too big to explicitly form (it would need $2^{40}$, about a trillion, elements). On the other hand we can treat it as linear operator, in which case we only need to compute its action on a vector of size $2^{20}$. This allows the computation of 'spectral' quantities of the form $\text{Tr}(f(\rho)$. One such quantity is the entropy $-\text{Tr} \left( \rho \log_2 \rho \right)$: ``` # mark the indices as belonging to either the 'left' or # 'right' hand side of the effective operator left_ix = [f'k{i}' for i in range(20)] rght_ix = [f'b{i}' for i in range(20)] # form the linear operator rho_ab = rho.aslinearoperator(left_ix, rght_ix) rho_ab ``` ``` f = qu.xlogx S = - qu.approx_spectral_function(rho_ab, f, tol=0.02) print("rho_entropy ~", S) ``` To compute a genuine entanglement measure we need a further small trick. Specifically, if we are computing the negativity between subsystem A and subsystem B, we can perform the [partial transpose](https://en.wikipedia.org/wiki/Peres%E2%80%93Horodecki_criterion) simply by swapping subsystem B's 'left' indices for right indices. This creates a linear operator of $\rho_{AB}^{T_B}$, which we can compute the logarithmic negativity for, $\mathcal{E} = \log_2 \text{Tr} |\rho_{AB}^{T_B}|$: ``` # partition 20 spins in two sysa = range(0, 10) sysb = range(10, 20) # k0, k1, k2, ... b10, b11, b12, ... left_ix = [f'k{i}' for i in sysa] + [f'b{i}' for i in sysb] # b0, b1, b2, ... k10, k11, k12, ... rght_ix = [f'b{i}' for i in sysa] + [f'k{i}' for i in sysb] rho_ab_pt = rho.aslinearoperator(left_ix, rght_ix) ``` Now we just to to take ``abs`` as the function $f$ and scale the result with $\log_2$: ``` f = abs neg = qu.approx_spectral_function(rho_ab_pt, f, tol=0.02) print("rho_ab logarithmic negativity ~", qu.log2(neg)) ```
github_jupyter
%matplotlib inline import quimb as qu import quimb.tensor as qtn n = 128 mera = qtn.MERA.rand_invar(n) from math import cos, sin, pi fix = { 'k{}'.format(i): (sin(2 * pi * i / n), cos(2 * pi * i / n)) for i in range(n) } # reduce the 'spring constant' k as well graph_opts = dict(fix=fix, k=0.01) mera.graph(color=['_UNI', '_ISO'], **graph_opts) mera.graph(color=[f'_LAYER{i}' for i in range(7)], **graph_opts) mera.graph(color=['I0', 'I40', 'I80'], **graph_opts) # select all tensors relevant for site-0 mera.select(0).graph(color=[f'_LAYER{i}' for i in range(7)]) nrm80 = mera.select(80).H & mera.select(80) nrm80.graph(color=[f'_LAYER{i}' for i in range(7)]) nrm80 ^ all i, j = 50, 100 ij_tags = mera.site_tag(i), mera.site_tag(j) ij_tags mera_ij_H = mera.select(ij_tags, which='any').H X = qu.pauli('X') Z = qu.pauli('X') XY_mera_ij = ( mera .gate(X, i) .gate(Z, j) .select(ij_tags, which='any') ) exp_XZ_ij = (mera_ij_H & XY_mera_ij) exp_XZ_ij.graph(color=[f'_LAYER{i}' for i in range(7)]) exp_XZ_ij ^ all %%timeit exp_XZ_ij ^ all # generate the 'bra' state mera_H = mera.H.reindex_sites('b{}', range(20)) # NB we have to slice *before* combining the subnetworks here. # this is because paired indices are mangled when joining # two networks -> only dangling indices are guranteed to # retain their value rho = ( mera_H.select(slice(20), which='any') & mera.select(slice(20), which='any') ) rho.graph(color=[f'_LAYER{i}' for i in range(7)]) rho.graph(color=[f'I{i}' for i in range(20)]) # mark the indices as belonging to either the 'left' or # 'right' hand side of the effective operator left_ix = [f'k{i}' for i in range(20)] rght_ix = [f'b{i}' for i in range(20)] # form the linear operator rho_ab = rho.aslinearoperator(left_ix, rght_ix) rho_ab f = qu.xlogx S = - qu.approx_spectral_function(rho_ab, f, tol=0.02) print("rho_entropy ~", S) # partition 20 spins in two sysa = range(0, 10) sysb = range(10, 20) # k0, k1, k2, ... b10, b11, b12, ... left_ix = [f'k{i}' for i in sysa] + [f'b{i}' for i in sysb] # b0, b1, b2, ... k10, k11, k12, ... rght_ix = [f'b{i}' for i in sysa] + [f'k{i}' for i in sysb] rho_ab_pt = rho.aslinearoperator(left_ix, rght_ix) f = abs neg = qu.approx_spectral_function(rho_ab_pt, f, tol=0.02) print("rho_ab logarithmic negativity ~", qu.log2(neg))
0.418935
0.930648
# Python Reference - DateTime **Author:** Robert Bantele #### Definition A date in Python is not a data type of its own, but we can import a module named datetime to work with dates as date objects ##### Links https://docs.python.org/3/library/datetime.html https://www.w3schools.com/python/python_datetime.asp ### import ``` from datetime import datetime ``` ### ctor datetime can be constructed by specifying the numbers for year, month & days (mandatory) ``` datetime(2015, 6, 2) ``` hours, minutes, seconds and microseconds are optional parameters that default to 0 ``` datetime(2015, 6, 2, 13) datetime(2015, 6, 2, 13, 57) datetime(2015, 6, 2, 13, 57, 59) datetime(2015, 6, 2, 13, 57, 59, 500) ``` ### date, time and datetime ``` from datetime import date, time my_date = date(2020, 3, 22) my_time = time(20, 30, 0) my_datetime = datetime.combine(my_date, my_time) print(type(my_date)) print(my_date) print(type(my_time)) print(my_time) print(type(my_datetime)) print(my_datetime) print(type(my_datetime.date())) print(my_datetime.date()) print(type(my_datetime.time())) print(my_datetime.time()) ``` ### now ``` datetime.now() ``` ### timedelta = TimeSpan subtracting two datetimes returns a **timedelta** object which is similar to C#'s "TimeSpan" ``` delta = datetime.now() - datetime(1900,12,31) print(delta) print(type(delta)) ``` ### parse from string **datetime.strptime(datetimestring: str, pattern: str)** creates a datetime object from a datetime string and a pattern string ``` datetime.strptime("2017:12:31:20:59", "%Y:%m:%d:%H:%M") ``` ### format to string **datetime.strftime(pattern: str)** creates a formatted string from a datetime object ``` print(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")) ``` #### playground use this cell to play around with formatting datetime strings ``` pattern: str = "%Y.%m.%d %H:%M:%S" print(datetime.now().strftime(pattern)) ``` #### patterns ##### Note: Examples are based on datetime.datetime(2013, 9, 30, 7, 6, 5) |Code | Meaning | Example | |:-----:|:--------|:--------| |**%a** | Weekday as locale’s abbreviated name. | **Mon** | |**%A** | Weekday as locale’s full name. | **Monday** | |**%w** | Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. | **1** | |**%d** | Day of the month as a zero-padded decimal number. | **30** | |**%-d** | Day of the month as a decimal number. (Platform specific) | **30** | |**%b** | Month as locale’s abbreviated name. | **Sep** | |**%B** | Month as locale’s full name. | **September** | |**%m** | Month as a zero-padded decimal number. | **09** | |**%-m** | Month as a decimal number. (Platform specific) | **9** | |**%y** | Year without century as a zero-padded decimal number. | **13** | |**%Y** | Year with century as a decimal number. | **2013** | |**%H** | Hour (24-hour clock) as a zero-padded decimal number. | **07** | |**%-H** | Hour (24-hour clock) as a decimal number. (Platform specific) | **7** | |**%I** | Hour (12-hour clock) as a zero-padded decimal number. | **07** | |**%-I** | Hour (12-hour clock) as a decimal number. (Platform specific) | **7** | |**%p** | Locale’s equivalent of either AM or PM. | **AM** | |**%M** | Minute as a zero-padded decimal number. | **06** | |**%-M** | Minute as a decimal number. (Platform specific) | **6** | |**%S** | Second as a zero-padded decimal number. | **05** | |**%-S** | Second as a decimal number. (Platform specific) | **5** | |**%f** | Microsecond as a decimal number, zero-padded on the left. | **000000** | |**%z** | UTC offset in the form +HHMM or -HHMM (empty string if the the object is naive). | x | |**%Z** | Time zone name (empty string if the object is naive). | x | |**%j** | Day of the year as a zero-padded decimal number. | **273** | |**%-j** | Day of the year as a decimal number. (Platform specific) | **273** | |**%U** | Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0. | **39** | |**%W** | Week number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0. | **39** | |**%c** | Locale’s appropriate date and time representation. | **Mon Sep 30 07:06:05 2013** | |**%x** | Locale’s appropriate date representation. | **09/30/13** | |**%X** | Locale’s appropriate time representation. | **07:06:05** | |**%%** | A literal '%' character. | **%** |
github_jupyter
from datetime import datetime datetime(2015, 6, 2) datetime(2015, 6, 2, 13) datetime(2015, 6, 2, 13, 57) datetime(2015, 6, 2, 13, 57, 59) datetime(2015, 6, 2, 13, 57, 59, 500) from datetime import date, time my_date = date(2020, 3, 22) my_time = time(20, 30, 0) my_datetime = datetime.combine(my_date, my_time) print(type(my_date)) print(my_date) print(type(my_time)) print(my_time) print(type(my_datetime)) print(my_datetime) print(type(my_datetime.date())) print(my_datetime.date()) print(type(my_datetime.time())) print(my_datetime.time()) datetime.now() delta = datetime.now() - datetime(1900,12,31) print(delta) print(type(delta)) datetime.strptime("2017:12:31:20:59", "%Y:%m:%d:%H:%M") print(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")) pattern: str = "%Y.%m.%d %H:%M:%S" print(datetime.now().strftime(pattern))
0.274935
0.952353
# Hyperparameter optimization with Dask Every machine learning model has some values that are specified before training begins. These values help adapt the model to the data but must be given before any training data is seen. For example, this might be `penalty` or `C` in Scikit-learn's [LogisiticRegression]. These values that come before any training data and are called "hyperparameters". Typical usage looks something like: ``` python from sklearn.linear_model import LogisiticRegression from sklearn.datasets import make_classification X, y = make_classification() est = LogisiticRegression(C=10, penalty="l2") est.fit(X, y) ``` These hyperparameters influence the quality of the prediction. For example, if `C` is too small in the example above, the output of the estimator will not fit the data well. Determining the values of these hyperparameters is difficult. In fact, Scikit-learn has an entire documentation page on finding the best values: https://scikit-learn.org/stable/modules/grid_search.html [LogisiticRegression]:https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html Dask enables some new techniques and opportunities for hyperparameter optimization. One of these opportunities involves stopping training early to limit computation. Naturally, this requires some way to stop and restart training (`partial_fit` or `warm_start` in Scikit-learn parlance). This is especially useful when the search is complex and has many search parameters. Good examples are most deep learning models, which has specialized algorithms for handling many data but have difficulty providing basic hyperparameters (e.g., "learning rate", "momentum" or "weight decay"). **This notebook will walk through** * setting up a realistic example * how to use `HyperbandSearchCV`, including * understanding the input parameters to `HyperbandSearchCV` * running the hyperparameter optimization * how to access informantion from `HyperbandSearchCV` This notebook will specifically *not* show a performance comparison motivating `HyperbandSearchCV` use. `HyperbandSearchCV` finds high scores with minimal training; however, this is a tutorial on how to *use* it. All performance comparisons are relegated to section [*Learn more*](#Learn-more). ``` %matplotlib inline ``` ## Setup Dask ``` from distributed import Client client = Client(processes=False, threads_per_worker=4, n_workers=1, memory_limit='2GB') client ``` ## Create Data ``` from sklearn.datasets import make_circles import numpy as np import pandas as pd X, y = make_circles(n_samples=30_000, random_state=0, noise=0.09) pd.DataFrame({0: X[:, 0], 1: X[:, 1], "class": y}).sample(4_000).plot.scatter( x=0, y=1, alpha=0.2, c="class", cmap="bwr" ); ``` ### Add random dimensions ``` from sklearn.utils import check_random_state rng = check_random_state(42) random_feats = rng.uniform(-1, 1, size=(X.shape[0], 4)) X = np.hstack((X, random_feats)) X.shape ``` ### Split and scale data ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=5_000, random_state=42) from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split scaler = StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) from dask.utils import format_bytes for name, X in [("train", X_train), ("test", X_test)]: print("dataset =", name) print("shape =", X.shape) print("bytes =", format_bytes(X.nbytes)) print("-" * 20) ``` Now we have our train and test sets. ## Create model and search space Let's use Scikit-learn's MLPClassifier as our model (for convenience). Let's use this model with 24 neurons and tune some of the other basic hyperparameters. ``` import numpy as np from sklearn.neural_network import MLPClassifier model = MLPClassifier() ``` Deep learning libraries can be used as well. In particular, [PyTorch]'s Scikit-Learn wrapper [Skorch] works well with `HyperbandSearchCV`. [PyTorch]:https://pytorch.org/ [Skorch]:https://skorch.readthedocs.io/en/stable/ ``` params = { "hidden_layer_sizes": [ (24, ), (12, 12), (6, 6, 6, 6), (4, 4, 4, 4, 4, 4), (12, 6, 3, 3), ], "activation": ["relu", "logistic", "tanh"], "alpha": np.logspace(-6, -3, num=1000), # cnts "batch_size": [16, 32, 64, 128, 256, 512], } ``` ## Hyperparameter optimization `HyperbandSearchCV` is Dask-ML's meta-estimator to find the best hyperparameters. It can be used as an alternative to `RandomizedSearchCV` to find similar hyper-parameters in less time by not wasting time on hyper-parameters that are not promising. Specifically, it is almost guaranteed that it will find high performing models with minimal training. This section will focus on 1. Understanding the input parameters to `HyperbandSearchCV` 2. Using `HyperbandSearchCV` to find the best hyperparameters 3. Seeing other use cases of `HyperbandSearchCV` ``` from dask_ml.model_selection import HyperbandSearchCV ``` ## Determining input parameters A rule-of-thumb to determine `HyperbandSearchCV`'s input parameters requires knowing: 1. the number of examples the longest trained model will see 2. the number of hyperparameters to evaluate Let's write down what these should be for this example: ``` # For quick response n_examples = 4 * len(X_train) n_params = 8 # In practice, HyperbandSearchCV is most useful for longer searches # n_examples = 15 * len(X_train) # n_params = 15 ``` In this, models that are trained the longest will see `n_examples` examples. This is how much data is required, normally set be the problem difficulty. Simple problems may only need 10 passes through the dataset; more complex problems may need 100 passes through the dataset. There will be `n_params` parameters sampled so `n_params` models will be evaluated. Models with low scores will be terminated before they see `n_examples` examples. This helps perserve computation. How can we use these values to determine the inputs for `HyperbandSearchCV`? ``` max_iter = n_params # number of times partial_fit will be called chunks = n_examples // n_params # number of examples each call sees max_iter, chunks ``` This means that the longest trained estimator will see about `n_examples` examples (specifically `n_params * (n_examples // n_params`). ## Applying input parameters Let's create a Dask array with this chunk size: ``` import dask.array as da X_train2 = da.from_array(X_train, chunks=chunks) y_train2 = da.from_array(y_train, chunks=chunks) X_train2 ``` Each `partial_fit` call will receive one chunk. That means the number of exmaples in each chunk should be (about) the same, and `n_examples` and `n_params` should be chosen to make that happen. (e.g., with 100 examples, shoot for chunks with `(33, 33, 34)` examples not `(48, 48, 4)` examples). Now let's use `max_iter` to create our `HyperbandSearchCV` object: ``` search = HyperbandSearchCV( model, params, max_iter=max_iter, patience=True, ) ``` ## How much computation will be performed? It isn't clear how to determine how much computation is done from `max_iter` and `chunks`. Luckily, `HyperbandSearchCV` has a `metadata` attribute to determine this beforehand: ``` search.metadata["partial_fit_calls"] ``` This shows how many `partial_fit` calls will be performed in the computation. `metadata` also includes information on the number of models created. So far, all that's been done is getting the search ready for computation (and seeing how much computation will be performed). So far, all the computation has been quick and easy. ## Performing the computation Now, let's do the model selection search and find the best hyperparameters. This is the real core of this notebook. This computation will be take place on all the hardware Dask has available. ``` %%time search.fit(X_train2, y_train2, classes=[0, 1, 2, 3]) ``` The dashboard will be active while this is running. It will show which workers are running `partial_fit` and `score` calls. This takes about 10 seconds. ## Integration `HyperbandSearchCV` follows the Scikit-learn API and mirrors Scikit-learn's `RandomizedSearchCV`. This means that it "just works". All the Scikit-learn attributes and methods are available: ``` search.best_score_ search.best_estimator_ cv_results = pd.DataFrame(search.cv_results_) cv_results.head() search.score(X_test, y_test) search.predict(X_test) search.predict(X_test).compute() ``` It also has some other attributes. ``` hist = pd.DataFrame(search.history_) hist.head() ``` This illustrates the history after every `partial_fit` call. There's also an attributed `model_history_` that records the history for each model (it's a reorganization of `history_`). ## Learn more This notebook covered basic usage `HyperbandSearchCV`. The following documentation and resources might be useful to learn more about `HyperbandSearchCV`, including some of the finer use cases: * [A talk](https://www.youtube.com/watch?v=x67K9FiPFBQ) introducing `HyperbandSearchCV` to the SciPy 2019 audience and the [corresponding paper](https://conference.scipy.org/proceedings/scipy2019/pdfs/scott_sievert.pdf) * [HyperbandSearchCV's documentation](https://ml.dask.org/modules/generated/dask_ml.model_selection.HyperbandSearchCV.html) Performance comparisons can be found in the SciPy 2019 talk/paper.
github_jupyter
These hyperparameters influence the quality of the prediction. For example, if `C` is too small in the example above, the output of the estimator will not fit the data well. Determining the values of these hyperparameters is difficult. In fact, Scikit-learn has an entire documentation page on finding the best values: https://scikit-learn.org/stable/modules/grid_search.html [LogisiticRegression]:https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html Dask enables some new techniques and opportunities for hyperparameter optimization. One of these opportunities involves stopping training early to limit computation. Naturally, this requires some way to stop and restart training (`partial_fit` or `warm_start` in Scikit-learn parlance). This is especially useful when the search is complex and has many search parameters. Good examples are most deep learning models, which has specialized algorithms for handling many data but have difficulty providing basic hyperparameters (e.g., "learning rate", "momentum" or "weight decay"). **This notebook will walk through** * setting up a realistic example * how to use `HyperbandSearchCV`, including * understanding the input parameters to `HyperbandSearchCV` * running the hyperparameter optimization * how to access informantion from `HyperbandSearchCV` This notebook will specifically *not* show a performance comparison motivating `HyperbandSearchCV` use. `HyperbandSearchCV` finds high scores with minimal training; however, this is a tutorial on how to *use* it. All performance comparisons are relegated to section [*Learn more*](#Learn-more). ## Setup Dask ## Create Data ### Add random dimensions ### Split and scale data Now we have our train and test sets. ## Create model and search space Let's use Scikit-learn's MLPClassifier as our model (for convenience). Let's use this model with 24 neurons and tune some of the other basic hyperparameters. Deep learning libraries can be used as well. In particular, [PyTorch]'s Scikit-Learn wrapper [Skorch] works well with `HyperbandSearchCV`. [PyTorch]:https://pytorch.org/ [Skorch]:https://skorch.readthedocs.io/en/stable/ ## Hyperparameter optimization `HyperbandSearchCV` is Dask-ML's meta-estimator to find the best hyperparameters. It can be used as an alternative to `RandomizedSearchCV` to find similar hyper-parameters in less time by not wasting time on hyper-parameters that are not promising. Specifically, it is almost guaranteed that it will find high performing models with minimal training. This section will focus on 1. Understanding the input parameters to `HyperbandSearchCV` 2. Using `HyperbandSearchCV` to find the best hyperparameters 3. Seeing other use cases of `HyperbandSearchCV` ## Determining input parameters A rule-of-thumb to determine `HyperbandSearchCV`'s input parameters requires knowing: 1. the number of examples the longest trained model will see 2. the number of hyperparameters to evaluate Let's write down what these should be for this example: In this, models that are trained the longest will see `n_examples` examples. This is how much data is required, normally set be the problem difficulty. Simple problems may only need 10 passes through the dataset; more complex problems may need 100 passes through the dataset. There will be `n_params` parameters sampled so `n_params` models will be evaluated. Models with low scores will be terminated before they see `n_examples` examples. This helps perserve computation. How can we use these values to determine the inputs for `HyperbandSearchCV`? This means that the longest trained estimator will see about `n_examples` examples (specifically `n_params * (n_examples // n_params`). ## Applying input parameters Let's create a Dask array with this chunk size: Each `partial_fit` call will receive one chunk. That means the number of exmaples in each chunk should be (about) the same, and `n_examples` and `n_params` should be chosen to make that happen. (e.g., with 100 examples, shoot for chunks with `(33, 33, 34)` examples not `(48, 48, 4)` examples). Now let's use `max_iter` to create our `HyperbandSearchCV` object: ## How much computation will be performed? It isn't clear how to determine how much computation is done from `max_iter` and `chunks`. Luckily, `HyperbandSearchCV` has a `metadata` attribute to determine this beforehand: This shows how many `partial_fit` calls will be performed in the computation. `metadata` also includes information on the number of models created. So far, all that's been done is getting the search ready for computation (and seeing how much computation will be performed). So far, all the computation has been quick and easy. ## Performing the computation Now, let's do the model selection search and find the best hyperparameters. This is the real core of this notebook. This computation will be take place on all the hardware Dask has available. The dashboard will be active while this is running. It will show which workers are running `partial_fit` and `score` calls. This takes about 10 seconds. ## Integration `HyperbandSearchCV` follows the Scikit-learn API and mirrors Scikit-learn's `RandomizedSearchCV`. This means that it "just works". All the Scikit-learn attributes and methods are available: It also has some other attributes.
0.954255
0.98944
``` import json import numpy as np import tensorflow as tf import collections from sklearn.cross_validation import train_test_split with open('ctexts.json','r') as fopen: ctexts = json.load(fopen) with open('headlines.json','r') as fopen: headlines = json.load(fopen) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD def topic_modelling(string, n = 500): vectorizer = TfidfVectorizer() tf = vectorizer.fit_transform([string]) tf_features = vectorizer.get_feature_names() compose = TruncatedSVD(1).fit(tf) return ' '.join([tf_features[i] for i in compose.components_[0].argsort()[: -n - 1 : -1]]) %%time h, c = [], [] for i in range(len(ctexts)): try: c.append(topic_modelling(ctexts[i])) h.append(headlines[i]) except: pass def build_dataset(words, n_words): count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]] count.extend(collections.Counter(words).most_common(n_words)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary concat_from = ' '.join(c).split() vocabulary_size_from = len(list(set(concat_from))) data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from) print('vocab from size: %d'%(vocabulary_size_from)) print('Most common words', count_from[4:10]) print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]]) concat_to = ' '.join(h).split() vocabulary_size_to = len(list(set(concat_to))) data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to) print('vocab to size: %d'%(vocabulary_size_to)) print('Most common words', count_to[4:10]) print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]]) for i in range(len(h)): h[i] = h[i] + ' EOS' h[0] GO = dictionary_from['GO'] PAD = dictionary_from['PAD'] EOS = dictionary_from['EOS'] UNK = dictionary_from['UNK'] def str_idx(corpus, dic, UNK=3): X = [] for i in corpus: ints = [] for k in i.split(): ints.append(dic.get(k, UNK)) X.append(ints) return X X = str_idx(c, dictionary_from) Y = str_idx(h, dictionary_to) train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size = 0.2) class Summarization: def __init__(self, size_layer, num_layers, embedded_size, from_dict_size, to_dict_size, batch_size): def lstm_cell(reuse=False): return tf.nn.rnn_cell.LSTMCell(size_layer, initializer=tf.orthogonal_initializer(), reuse=reuse) def attention(encoder_out, seq_len, reuse=False): attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size_layer, memory = encoder_out, memory_sequence_length = seq_len) return tf.contrib.seq2seq.AttentionWrapper( cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(reuse) for _ in range(num_layers)]), attention_mechanism = attention_mechanism, attention_layer_size = size_layer) self.X = tf.placeholder(tf.int32, [None, None]) self.Y = tf.placeholder(tf.int32, [None, None]) self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32) self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32) batch_size = tf.shape(self.X)[0] # encoder encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1)) encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X) encoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(num_layers)]) self.encoder_out, self.encoder_state = tf.nn.dynamic_rnn(cell = encoder_cells, inputs = encoder_embedded, sequence_length = self.X_seq_len, dtype = tf.float32) self.encoder_state = tuple(self.encoder_state[-1] for _ in range(num_layers)) main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1]) decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1) # decoder decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1)) decoder_cell = attention(self.encoder_out, self.X_seq_len) dense_layer = tf.layers.Dense(to_dict_size) training_helper = tf.contrib.seq2seq.TrainingHelper( inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input), sequence_length = self.Y_seq_len, time_major = False) training_decoder = tf.contrib.seq2seq.BasicDecoder( cell = decoder_cell, helper = training_helper, initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=self.encoder_state), output_layer = dense_layer) training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder = training_decoder, impute_finished = True, maximum_iterations = tf.reduce_max(self.Y_seq_len)) predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( embedding = encoder_embeddings, start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]), end_token = EOS) predicting_decoder = tf.contrib.seq2seq.BasicDecoder( cell = decoder_cell, helper = predicting_helper, initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=self.encoder_state), output_layer = dense_layer) predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder = predicting_decoder, impute_finished = True, maximum_iterations = tf.reduce_max(self.X_seq_len)) self.training_logits = training_decoder_output.rnn_output self.predicting_ids = predicting_decoder_output.sample_id masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32) self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits, targets = self.Y, weights = masks) self.optimizer = tf.train.AdamOptimizer().minimize(self.cost) y_t = tf.argmax(self.training_logits,axis=2) y_t = tf.cast(y_t, tf.int32) self.prediction = tf.boolean_mask(y_t, masks) mask_label = tf.boolean_mask(self.Y, masks) correct_pred = tf.equal(self.prediction, mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) size_layer = 128 num_layers = 2 embedded_size = 128 batch_size = 8 epoch = 5 tf.reset_default_graph() sess = tf.InteractiveSession() model = Summarization(size_layer, num_layers, embedded_size, len(dictionary_from), len(dictionary_to), batch_size) sess.run(tf.global_variables_initializer()) def pad_sentence_batch(sentence_batch, pad_int): padded_seqs = [] seq_lens = [] max_sentence_len = max([len(sentence) for sentence in sentence_batch]) for sentence in sentence_batch: padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence))) seq_lens.append(len(sentence)) return padded_seqs, seq_lens from tqdm import tqdm from sklearn.utils import shuffle import time for EPOCH in range(10): lasttime = time.time() total_loss, total_accuracy, total_loss_test, total_accuracy_test = 0, 0, 0, 0 train_X, train_Y = shuffle(train_X, train_Y) test_X, test_Y = shuffle(test_X, test_Y) pbar = tqdm(range(0, len(train_X), batch_size), desc='train minibatch loop') for k in pbar: batch_x, _ = pad_sentence_batch(train_X[k: min(k+batch_size,len(train_X))], PAD) batch_y, _ = pad_sentence_batch(train_Y[k: min(k+batch_size,len(train_X))], PAD) acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer], feed_dict={model.X:batch_x, model.Y:batch_y}) total_loss += loss total_accuracy += acc pbar.set_postfix(cost=loss, accuracy = acc) pbar = tqdm(range(0, len(test_X), batch_size), desc='test minibatch loop') for k in pbar: batch_x, _ = pad_sentence_batch(test_X[k: min(k+batch_size,len(test_X))], PAD) batch_y, _ = pad_sentence_batch(test_Y[k: min(k+batch_size,len(test_X))], PAD) acc, loss = sess.run([model.accuracy, model.cost], feed_dict={model.X:batch_x, model.Y:batch_y}) total_loss_test += loss total_accuracy_test += acc pbar.set_postfix(cost=loss, accuracy = acc) total_loss /= (len(train_X) / batch_size) total_accuracy /= (len(train_X) / batch_size) total_loss_test /= (len(test_X) / batch_size) total_accuracy_test /= (len(test_X) / batch_size) print('epoch: %d, avg loss: %f, avg accuracy: %f'%(EPOCH, total_loss, total_accuracy)) print('epoch: %d, avg loss test: %f, avg accuracy test: %f'%(EPOCH, total_loss_test, total_accuracy_test)) sess.run(model.predicting_ids, feed_dict = {model.X: batch_x}) batch_y ```
github_jupyter
import json import numpy as np import tensorflow as tf import collections from sklearn.cross_validation import train_test_split with open('ctexts.json','r') as fopen: ctexts = json.load(fopen) with open('headlines.json','r') as fopen: headlines = json.load(fopen) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD def topic_modelling(string, n = 500): vectorizer = TfidfVectorizer() tf = vectorizer.fit_transform([string]) tf_features = vectorizer.get_feature_names() compose = TruncatedSVD(1).fit(tf) return ' '.join([tf_features[i] for i in compose.components_[0].argsort()[: -n - 1 : -1]]) %%time h, c = [], [] for i in range(len(ctexts)): try: c.append(topic_modelling(ctexts[i])) h.append(headlines[i]) except: pass def build_dataset(words, n_words): count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]] count.extend(collections.Counter(words).most_common(n_words)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary concat_from = ' '.join(c).split() vocabulary_size_from = len(list(set(concat_from))) data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from) print('vocab from size: %d'%(vocabulary_size_from)) print('Most common words', count_from[4:10]) print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]]) concat_to = ' '.join(h).split() vocabulary_size_to = len(list(set(concat_to))) data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to) print('vocab to size: %d'%(vocabulary_size_to)) print('Most common words', count_to[4:10]) print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]]) for i in range(len(h)): h[i] = h[i] + ' EOS' h[0] GO = dictionary_from['GO'] PAD = dictionary_from['PAD'] EOS = dictionary_from['EOS'] UNK = dictionary_from['UNK'] def str_idx(corpus, dic, UNK=3): X = [] for i in corpus: ints = [] for k in i.split(): ints.append(dic.get(k, UNK)) X.append(ints) return X X = str_idx(c, dictionary_from) Y = str_idx(h, dictionary_to) train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size = 0.2) class Summarization: def __init__(self, size_layer, num_layers, embedded_size, from_dict_size, to_dict_size, batch_size): def lstm_cell(reuse=False): return tf.nn.rnn_cell.LSTMCell(size_layer, initializer=tf.orthogonal_initializer(), reuse=reuse) def attention(encoder_out, seq_len, reuse=False): attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size_layer, memory = encoder_out, memory_sequence_length = seq_len) return tf.contrib.seq2seq.AttentionWrapper( cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(reuse) for _ in range(num_layers)]), attention_mechanism = attention_mechanism, attention_layer_size = size_layer) self.X = tf.placeholder(tf.int32, [None, None]) self.Y = tf.placeholder(tf.int32, [None, None]) self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32) self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32) batch_size = tf.shape(self.X)[0] # encoder encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1)) encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X) encoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(num_layers)]) self.encoder_out, self.encoder_state = tf.nn.dynamic_rnn(cell = encoder_cells, inputs = encoder_embedded, sequence_length = self.X_seq_len, dtype = tf.float32) self.encoder_state = tuple(self.encoder_state[-1] for _ in range(num_layers)) main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1]) decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1) # decoder decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1)) decoder_cell = attention(self.encoder_out, self.X_seq_len) dense_layer = tf.layers.Dense(to_dict_size) training_helper = tf.contrib.seq2seq.TrainingHelper( inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input), sequence_length = self.Y_seq_len, time_major = False) training_decoder = tf.contrib.seq2seq.BasicDecoder( cell = decoder_cell, helper = training_helper, initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=self.encoder_state), output_layer = dense_layer) training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder = training_decoder, impute_finished = True, maximum_iterations = tf.reduce_max(self.Y_seq_len)) predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( embedding = encoder_embeddings, start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]), end_token = EOS) predicting_decoder = tf.contrib.seq2seq.BasicDecoder( cell = decoder_cell, helper = predicting_helper, initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=self.encoder_state), output_layer = dense_layer) predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder = predicting_decoder, impute_finished = True, maximum_iterations = tf.reduce_max(self.X_seq_len)) self.training_logits = training_decoder_output.rnn_output self.predicting_ids = predicting_decoder_output.sample_id masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32) self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits, targets = self.Y, weights = masks) self.optimizer = tf.train.AdamOptimizer().minimize(self.cost) y_t = tf.argmax(self.training_logits,axis=2) y_t = tf.cast(y_t, tf.int32) self.prediction = tf.boolean_mask(y_t, masks) mask_label = tf.boolean_mask(self.Y, masks) correct_pred = tf.equal(self.prediction, mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) size_layer = 128 num_layers = 2 embedded_size = 128 batch_size = 8 epoch = 5 tf.reset_default_graph() sess = tf.InteractiveSession() model = Summarization(size_layer, num_layers, embedded_size, len(dictionary_from), len(dictionary_to), batch_size) sess.run(tf.global_variables_initializer()) def pad_sentence_batch(sentence_batch, pad_int): padded_seqs = [] seq_lens = [] max_sentence_len = max([len(sentence) for sentence in sentence_batch]) for sentence in sentence_batch: padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence))) seq_lens.append(len(sentence)) return padded_seqs, seq_lens from tqdm import tqdm from sklearn.utils import shuffle import time for EPOCH in range(10): lasttime = time.time() total_loss, total_accuracy, total_loss_test, total_accuracy_test = 0, 0, 0, 0 train_X, train_Y = shuffle(train_X, train_Y) test_X, test_Y = shuffle(test_X, test_Y) pbar = tqdm(range(0, len(train_X), batch_size), desc='train minibatch loop') for k in pbar: batch_x, _ = pad_sentence_batch(train_X[k: min(k+batch_size,len(train_X))], PAD) batch_y, _ = pad_sentence_batch(train_Y[k: min(k+batch_size,len(train_X))], PAD) acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer], feed_dict={model.X:batch_x, model.Y:batch_y}) total_loss += loss total_accuracy += acc pbar.set_postfix(cost=loss, accuracy = acc) pbar = tqdm(range(0, len(test_X), batch_size), desc='test minibatch loop') for k in pbar: batch_x, _ = pad_sentence_batch(test_X[k: min(k+batch_size,len(test_X))], PAD) batch_y, _ = pad_sentence_batch(test_Y[k: min(k+batch_size,len(test_X))], PAD) acc, loss = sess.run([model.accuracy, model.cost], feed_dict={model.X:batch_x, model.Y:batch_y}) total_loss_test += loss total_accuracy_test += acc pbar.set_postfix(cost=loss, accuracy = acc) total_loss /= (len(train_X) / batch_size) total_accuracy /= (len(train_X) / batch_size) total_loss_test /= (len(test_X) / batch_size) total_accuracy_test /= (len(test_X) / batch_size) print('epoch: %d, avg loss: %f, avg accuracy: %f'%(EPOCH, total_loss, total_accuracy)) print('epoch: %d, avg loss test: %f, avg accuracy test: %f'%(EPOCH, total_loss_test, total_accuracy_test)) sess.run(model.predicting_ids, feed_dict = {model.X: batch_x}) batch_y
0.517815
0.403802
<a href="https://colab.research.google.com/github/gaoxingliang/learnpython/blob/master/Q_Learning_with_FrozenLakev2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Q* Learning with FrozenLake 4x4 In this Notebook, we'll implement an agent <b>that plays FrozenLake.</b> ![alt text](http://simoninithomas.com/drlc/Qlearning/frozenlake4x4.png) The goal of this game is <b>to go from the starting state (S) to the goal state (G)</b> by walking only on frozen tiles (F) and avoid holes (H). However, the ice is slippery, **so you won't always move in the direction you intend (stochastic environment)** Thanks to [lukewys](https://github.com/lukewys) for his help ## Prerequisites 🏗️ Before diving on the notebook **you need to understand**: - The foundations of Reinforcement learning (MC, TD, Rewards hypothesis...) [Article](https://medium.freecodecamp.org/an-introduction-to-reinforcement-learning-4339519de419) - Q-learning [Article](https://medium.freecodecamp.org/diving-deeper-into-reinforcement-learning-with-q-learning-c18d0db58efe) - In the [video version](https://www.youtube.com/watch?v=q2ZOEFAaaI0) we implemented a Q-learning agent that learns to play OpenAI Taxi-v2 🚕 with Numpy. # This is a notebook from [Deep Reinforcement Learning Course, new version](https://simoninithomas.github.io/Deep_reinforcement_learning_Course/) <img src="https://raw.githubusercontent.com/simoninithomas/Deep_reinforcement_learning_Course/master/docs/assets/img/DRLC%20Environments.png" alt="Deep Reinforcement Course"/> <br> <p> Deep Reinforcement Learning Course is a free series of articles and videos tutorials 🆕 about Deep Reinforcement Learning, where **we'll learn the main algorithms (Q-learning, Deep Q Nets, Dueling Deep Q Nets, Policy Gradients, A2C, Proximal Policy Gradients, Prediction Based rewards agents…), and how to implement them with Tensorflow and PyTorch.** ![alt text](http://simoninithomas.com/drlc/libraries.png) <br><br> 📜The articles explain the architectures from the big picture to the mathematical details behind them. <br> 📹 The videos explain how to build the agents with Tensorflow </b></p> <br> This course will give you a **solid foundation for understanding and implementing the future state of the art algorithms**. And, you'll build a strong professional portfolio by creating **agents that learn to play awesome environments**: Doom© 👹, Space invaders 👾, Outrun, Sonic the Hedgehog©, Michael Jackson’s Moonwalker, agents that will be able to navigate in 3D environments with DeepMindLab (Quake) and able to walk with Mujoco. <br><br> </p> ## 📚 The complete [Syllabus HERE](https://simoninithomas.github.io/Deep_reinforcement_learning_Course/) ## Any questions 👨‍💻 <p> If you have any questions, feel free to ask me: </p> <p> 📧: <a href="mailto:[email protected]">[email protected]</a> </p> <p> Github: https://github.com/simoninithomas/Deep_reinforcement_learning_Course </p> <p> 🌐 : https://simoninithomas.github.io/Deep_reinforcement_learning_Course/ </p> <p> Twitter: <a href="https://twitter.com/ThomasSimonini">@ThomasSimonini</a> </p> <p> Don't forget to <b> follow me on <a href="https://twitter.com/ThomasSimonini">twitter</a>, <a href="https://github.com/simoninithomas/Deep_reinforcement_learning_Course">github</a> and <a href="https://medium.com/@thomassimonini">Medium</a> to be alerted of the new articles that I publish </b></p> ## How to help 🙌 3 ways: - **Clap our articles and like our videos a lot**:Clapping in Medium means that you really like our articles. And the more claps we have, the more our article is shared Liking our videos help them to be much more visible to the deep learning community. - **Share and speak about our articles and videos**: By sharing our articles and videos you help us to spread the word. - **Improve our notebooks**: if you found a bug or **a better implementation** you can send a pull request. <br> ## Important note 🤔 <b> You can run it on your computer but it's better to run it on GPU based services</b>, personally I use Microsoft Azure and their Deep Learning Virtual Machine (they offer 170$) https://azuremarketplace.microsoft.com/en-us/marketplace/apps/microsoft-ads.dsvm-deep-learning <br> ⚠️ I don't have any business relations with them. I just loved their excellent customer service. If you have some troubles to use Microsoft Azure follow the explainations of this excellent article here (without last the part fast.ai): https://medium.com/@manikantayadunanda/setting-up-deeplearning-machine-and-fast-ai-on-azure-a22eb6bd6429 ## Step -1: Install the dependencies on Google Colab ``` !pip install numpy !pip install openai-gym ``` ## Step 0: Import the dependencies 📚 We use 3 libraries: - `Numpy` for our Qtable - `OpenAI Gym` for our FrozenLake Environment - `Random` to generate random numbers ``` import numpy as np import gym import random ``` ## Step 1: Create the environment 🎮 - Here we'll create the FrozenLake 8x8 environment. - OpenAI Gym is a library <b> composed of many environments that we can use to train our agents.</b> - In our case we choose to use Frozen Lake. ``` from gym.envs.registration import register register( id='FrozenLakeNotSlippery-v0', entry_point='gym.envs.toy_text:FrozenLakeEnv', kwargs={'map_name' : '8x8', 'is_slippery': False}, max_episode_steps=100, reward_threshold=0.8196, # optimum = .8196, changing this seems have no influence ) env = gym.make("FrozenLakeNotSlippery-v0") ``` ## Step 2: Create the Q-table and initialize it 🗄️ - Now, we'll create our Q-table, to know how much rows (states) and columns (actions) we need, we need to calculate the action_size and the state_size - OpenAI Gym provides us a way to do that: `env.action_space.n` and `env.observation_space.n` ``` ```
github_jupyter
!pip install numpy !pip install openai-gym import numpy as np import gym import random from gym.envs.registration import register register( id='FrozenLakeNotSlippery-v0', entry_point='gym.envs.toy_text:FrozenLakeEnv', kwargs={'map_name' : '8x8', 'is_slippery': False}, max_episode_steps=100, reward_threshold=0.8196, # optimum = .8196, changing this seems have no influence ) env = gym.make("FrozenLakeNotSlippery-v0")
0.511473
0.984752
# Vertex Model Grid (DISV) plotting No problem! Flopy supports vertex model grid plotting through the `PlotMapView` and `PlotCrossSection` classes. The method calls are almost identical to models that use a Structured Model Grid (DIS) to define the model discretization and the same keyword arguments are supported. Let's run through an example using a vertex model grid. First let's import flopy and get the model loaded! ``` import os import sys import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy from flopy.plot import PlotMapView, PlotCrossSection from flopy.utils import HeadFile, CellBudgetFile, geometry except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy from flopy.plot import PlotMapView, PlotCrossSection from flopy.utils import HeadFile, CellBudgetFile, geometry print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) # load up the example problem sim_name = "mfsim.nam" sim_path = "../data/mf6/test003_gwftri_disv" sim = flopy.mf6.MFSimulation.load(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_path) ``` Now let's check the model name and then get an instance of our model ``` sim.model_names ml = sim.get_model('gwf_1') ml ``` ## Build a vertex model grid object The `VertexModelGrid` in not yet integrated into FloPy, but will be shortly. Until then we can build a `VertexModelGrid` instance to use for plotting ``` from flopy.discretization import VertexGrid vmg = VertexGrid(ml.dis.vertices.array, ml.dis.cell2d.array, top=ml.dis.top.array, botm=ml.dis.botm.array, idomain=ml.dis.idomain.array, angrot=-25) vmg ``` Now all the discretization information is available to be used to plot model data in a `PlotMapView` or `PlotCrossSection` object Let's begin with `PlotMapView` and plot the model grid. ``` vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_grid() ``` As we can see, the model grid plots as a series of grid lines. We can also plot inactive cells using `plot_inactive` ``` vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_inactive() vmap.plot_grid() ``` Model data can be plotted using the `plot_array` method as follows: ``` vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_array(a=ml.dis.botm.array) plt.colorbar(ax) ``` This plot shows the bottom elevations of layer 1. These are a constant value of 1.5 in this example. Let's import some data from the model output to use with `plot_array()` to illustrate plotting better. ``` hds_file = os.path.join(sim_path, "tri_model.hds") cbc_file = os.path.join(sim_path, "tri_model.cbc") hds = HeadFile(hds_file) hdata = hds.get_alldata()[0] hdata.shape = (4, -1) vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_inactive() ax = vmap.plot_array(a=hdata.ravel(), masked_values=[1e30]) plt.colorbar(ax) ``` Data can also be contoured using the `contour_array` method. ``` vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_inactive(alpha=0.5) ax = vmap.plot_array(a=hdata[0], masked_values=[1e30], alpha=0.5) levels = np.arange(5, 10, 0.5) vmap.contour_array(a=hdata[0], masked_values=[1e30], levels=levels) plt.colorbar(ax) ``` Print the head at specific coordinates. ``` icell2d = vmg.intersect(8.,-1.) print('The head in layer 1 at x = 8, y = -1 is {}'.format(hdata[0,icell2d])) ``` ### Plotting specific discharge In modflow 6 SAVE_SPECIFIC_DISCHARGE can be specified in the NPF package option block. Here we grad the specific discharge recarray and use it to plot discharge vectors using FloPy ``` cbc = CellBudgetFile(cbc_file, precision='double') spdis = cbc.get_data(text="SPDIS") len(spdis[0]) ``` Now we can pass the specific discharge recarray to `plot_specific_discharge`; FloPy will create a quiver plot of the discharge vectors ``` vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_grid() ax = vmap.plot_array(a=hdata, alpha=0.5, masked_values=[1e30]) plt.colorbar(ax) ax = vmap.plot_specific_discharge(spdis[0]) plt.title("Specific Discharge"); ``` # Working with model cross sections Flopy supports cross sections for vertex model grids, similarly to structured model grids by using the `PlotCrossSection` class. In fact most of the functionality is identical to `PlotCrossSection` with a Structured model grid. ### Note: Cross sections must be defined by a line, since there is no row or column in a vertex model grid. Let's start by creating our cross section line and showing it on a `PlotMapView` object. ``` # define a line through the model in model coordiantes as a # series of XY vertices where the cross section will be sliced! line = np.array([(0,3.5), (5, 3.5), (10, 2)]) # Transform the line into the spatial refenernce projection to match the projected coordiates # Question? should we give the user a model grid based vertex option? line = geometry.transform(line.T[0], line.T[1], vmg.xoffset, vmg.yoffset, vmg.angrot_radians) vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_grid() plt.plot(line[0], line[1], 'r--', linewidth=2) ``` Now that we see our cross section line traverses the simulation domain where we were expecting it, let's create a cross section using the `PlotCrossSection` and plot the model grid using `plot_grid` Our cross section line is passed to the `line` dictionary ``` line = np.array(line).T vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) vcs.plot_grid() ``` Array data can be plotted over similarly to the `PlotMapView` example by calling the `plot_array` method. ``` vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30]) plt.colorbar(ax) vcs.plot_grid() ``` Contour plots can also be made using the `contour_array()` method. ``` vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30], alpha=0.3) plt.colorbar(ax) # set our own contour levels using the matplotlib keyword argument levels levels = np.arange(5, 10, 0.5) vcs.contour_array(a=hdata, masked_values=[1e30], levels=levels) vcs.plot_grid() ``` ## Plotting discharge Discharge is plotted similarly to the previous example: The SPDIS recarray is used with `plot_specific_discharge` to create and plot flow vectors ### Note: Arbitrary cross section lines cannot be used with this method. A straight cross section must be supplied to calculate discharge vectors. ``` # Added 0.01 to 10 to account for precision issues with transforms... line = np.array([(-0.1, 3.5), (10.1, 3.5)]) # Transform the line into the spatial refenernce projection to match the projected coordiates line = geometry.transform(line.T[0], line.T[1], vmg.xoffset, vmg.yoffset, vmg.angrot_radians) line = np.array(line).T vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30]) plt.colorbar(ax) vcs.plot_grid() # use hstep to plot every second cell in the horizontal direction ax = vcs.plot_specific_discharge(spdis[0], head=hdata, hstep=2) plt.xlim([0, 10]) plt.title("Vertex grid specific discharge") ``` The discharge vectors look a little crammed due to the triangular nature of the grid ### Let's load up a square grid example and plot up discharge vectors. ``` sim_name = "mfsim.nam" sim_path = "../data/mf6/test003_gwfs_disv" sim = flopy.mf6.MFSimulation.load(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_path) sim.run_simulation() ml = sim.get_model('gwf_1') vmg = VertexGrid(ml.dis.vertices.array, ml.dis.cell2d.array, top=ml.dis.top.array, botm=ml.dis.botm.array, idomain=ml.dis.idomain.array, xoff=10, yoff=0, angrot=-25) cbc_file = os.path.join(sim_path, "model.cbc") #"expected_output/", "model_unch.cbc") hds_file = os.path.join(sim_path, "model.hds") # "expected_output/", "model_unch.hds") cbc = CellBudgetFile(cbc_file, precision='double') spdis = cbc.get_data(text="SPDIS") hds = HeadFile(hds_file) hdata = hds.get_alldata()[0] hdata.shape = (4, -1) ``` Now let's plot the discharge vectors for a model with a grid of uniform elevations ``` # Added 0.01 to 10 to account for precision issues with transforms... line = np.array([(-0.1, 2.5), (10.1, 2.5)]) # Transform the line into the spatial refenernce projection to match the projected coordiates line = geometry.transform(line.T[0], line.T[1], vmg.xoffset, vmg.yoffset, vmg.angrot_radians) line = np.array(line).T vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30]) plt.colorbar(ax) vcs.plot_grid() ax = vcs.plot_specific_discharge(spdis[0], head=hdata) plt.xlim([0, 10]) plt.title("Vertex grid specific discharge"); ``` These discharge vectors are much more uniform as expected. ### For more information about the plotting functionality in flopy including optional keyword arguments, please see the ipython notebook: flopy3_PlotArrayExample.ipynb ### Happy plotting!
github_jupyter
import os import sys import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy from flopy.plot import PlotMapView, PlotCrossSection from flopy.utils import HeadFile, CellBudgetFile, geometry except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy from flopy.plot import PlotMapView, PlotCrossSection from flopy.utils import HeadFile, CellBudgetFile, geometry print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) # load up the example problem sim_name = "mfsim.nam" sim_path = "../data/mf6/test003_gwftri_disv" sim = flopy.mf6.MFSimulation.load(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_path) sim.model_names ml = sim.get_model('gwf_1') ml from flopy.discretization import VertexGrid vmg = VertexGrid(ml.dis.vertices.array, ml.dis.cell2d.array, top=ml.dis.top.array, botm=ml.dis.botm.array, idomain=ml.dis.idomain.array, angrot=-25) vmg vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_grid() vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_inactive() vmap.plot_grid() vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_array(a=ml.dis.botm.array) plt.colorbar(ax) hds_file = os.path.join(sim_path, "tri_model.hds") cbc_file = os.path.join(sim_path, "tri_model.cbc") hds = HeadFile(hds_file) hdata = hds.get_alldata()[0] hdata.shape = (4, -1) vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_inactive() ax = vmap.plot_array(a=hdata.ravel(), masked_values=[1e30]) plt.colorbar(ax) vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_inactive(alpha=0.5) ax = vmap.plot_array(a=hdata[0], masked_values=[1e30], alpha=0.5) levels = np.arange(5, 10, 0.5) vmap.contour_array(a=hdata[0], masked_values=[1e30], levels=levels) plt.colorbar(ax) icell2d = vmg.intersect(8.,-1.) print('The head in layer 1 at x = 8, y = -1 is {}'.format(hdata[0,icell2d])) cbc = CellBudgetFile(cbc_file, precision='double') spdis = cbc.get_data(text="SPDIS") len(spdis[0]) vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_grid() ax = vmap.plot_array(a=hdata, alpha=0.5, masked_values=[1e30]) plt.colorbar(ax) ax = vmap.plot_specific_discharge(spdis[0]) plt.title("Specific Discharge"); # define a line through the model in model coordiantes as a # series of XY vertices where the cross section will be sliced! line = np.array([(0,3.5), (5, 3.5), (10, 2)]) # Transform the line into the spatial refenernce projection to match the projected coordiates # Question? should we give the user a model grid based vertex option? line = geometry.transform(line.T[0], line.T[1], vmg.xoffset, vmg.yoffset, vmg.angrot_radians) vmap = PlotMapView(modelgrid=vmg, layer=0) ax = vmap.plot_grid() plt.plot(line[0], line[1], 'r--', linewidth=2) line = np.array(line).T vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) vcs.plot_grid() vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30]) plt.colorbar(ax) vcs.plot_grid() vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30], alpha=0.3) plt.colorbar(ax) # set our own contour levels using the matplotlib keyword argument levels levels = np.arange(5, 10, 0.5) vcs.contour_array(a=hdata, masked_values=[1e30], levels=levels) vcs.plot_grid() # Added 0.01 to 10 to account for precision issues with transforms... line = np.array([(-0.1, 3.5), (10.1, 3.5)]) # Transform the line into the spatial refenernce projection to match the projected coordiates line = geometry.transform(line.T[0], line.T[1], vmg.xoffset, vmg.yoffset, vmg.angrot_radians) line = np.array(line).T vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30]) plt.colorbar(ax) vcs.plot_grid() # use hstep to plot every second cell in the horizontal direction ax = vcs.plot_specific_discharge(spdis[0], head=hdata, hstep=2) plt.xlim([0, 10]) plt.title("Vertex grid specific discharge") sim_name = "mfsim.nam" sim_path = "../data/mf6/test003_gwfs_disv" sim = flopy.mf6.MFSimulation.load(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_path) sim.run_simulation() ml = sim.get_model('gwf_1') vmg = VertexGrid(ml.dis.vertices.array, ml.dis.cell2d.array, top=ml.dis.top.array, botm=ml.dis.botm.array, idomain=ml.dis.idomain.array, xoff=10, yoff=0, angrot=-25) cbc_file = os.path.join(sim_path, "model.cbc") #"expected_output/", "model_unch.cbc") hds_file = os.path.join(sim_path, "model.hds") # "expected_output/", "model_unch.hds") cbc = CellBudgetFile(cbc_file, precision='double') spdis = cbc.get_data(text="SPDIS") hds = HeadFile(hds_file) hdata = hds.get_alldata()[0] hdata.shape = (4, -1) # Added 0.01 to 10 to account for precision issues with transforms... line = np.array([(-0.1, 2.5), (10.1, 2.5)]) # Transform the line into the spatial refenernce projection to match the projected coordiates line = geometry.transform(line.T[0], line.T[1], vmg.xoffset, vmg.yoffset, vmg.angrot_radians) line = np.array(line).T vcs = PlotCrossSection(modelgrid=vmg, line={"line": line}) ax = vcs.plot_array(a=hdata, masked_values=[1e30]) plt.colorbar(ax) vcs.plot_grid() ax = vcs.plot_specific_discharge(spdis[0], head=hdata) plt.xlim([0, 10]) plt.title("Vertex grid specific discharge");
0.454714
0.973139
``` # 解压数据集 # !unzip -oq /home/aistudio/data/data100477/常规赛:PALM眼底彩照中黄斑中央凹定位.zip # !rm -rf __MACOSX import blackhole.dataframe as pd # 数据读取 import paddle.vision.transforms as T # 数据增强 import matplotlib.pyplot as plt import paddle import cv2 import numpy as np # 我的测试 file_path='常规赛:PALM眼底彩照中黄斑中央凹定位/Train/fundus_image/' # file_path='常规赛:PALM眼底彩照中黄斑中央凹定位/PALM-Testing400-Images/' # file_name='H0001.jpg' list_=os.listdir(file_path) print(len(list_),list_[0]) shapes=[] for f in list_: img=cv2.imread(file_path+f,-1) a=img.shape if(shapes.count(a)==0): shapes.append(a) print(shapes) # plt.imshow(img[:,:,[2,1,0]]) # 查看数据标签 df=pd.read_excel('常规赛:PALM眼底彩照中黄斑中央凹定位/Train/Fovea_Location_train.xlsx') df.head(1) # print(type(df)) # 计算标签的均值和标准差,用于标签的归一化 key_pts_values = df.values[:,1:] # 取出标签信息 data_mean = key_pts_values.mean() # 计算均值 data_std = key_pts_values.std() # 计算标准差 print('标签的均值为:', data_mean) print('标签的标准差为:', data_std) # 数据增强:调整图像大小Resize、随机位置裁剪RandomCrop、灰度化图片GrayNormalize、变更图片通道ToCHW import paddle.vision.transforms.functional as F # 调整图像大小 class Resize(object): # 将输入图像调整为指定大小 def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, data): image = data[0] # 获取图片 key_pts = data[1] # 获取标签 image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) h, w = image_copy.shape[:2] new_h, new_w = self.output_size,self.output_size new_h, new_w = int(new_h), int(new_w) img = F.resize(image_copy, (new_h, new_w)) # scale the pts, too key_pts_copy[::2] = key_pts_copy[::2] * new_w / w key_pts_copy[1::2] = key_pts_copy[1::2] * new_h / h return img, key_pts_copy # 随机位置裁剪 class RandomCrop(object): # 随机位置裁剪输入的图像 def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, data): image = data[0] key_pts = data[1] image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) h, w = image_copy.shape[:2] new_h, new_w = self.output_size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) image_copy = image_copy[top: top + new_h, left: left + new_w] key_pts_copy[::2] = key_pts_copy[::2] - left key_pts_copy[1::2] = key_pts_copy[1::2] - top return image_copy, key_pts_copy # 图片灰度化 class GrayNormalize(object): # 将图片变为灰度图,并将其值放缩到[0, 1] # 将 label 放缩到 [-1, 1] 之间 def __call__(self, data): image = data[0] # 获取图片 key_pts = data[1] # 获取标签 image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) # 灰度化图片 gray_scale = paddle.vision.transforms.Grayscale(num_output_channels=3) image_copy = gray_scale(image_copy) # 将图片值放缩到 [0, 1] image_copy = (image_copy-127.5) / 127.5 # 将坐标点放缩到 [-1, 1] mean = data_mean # 获取标签均值 std = data_std # 获取标签标准差 key_pts_copy = (key_pts_copy - mean)/std return image_copy, key_pts_copy # 变更图片通道 class ToCHW(object): # 将图像的格式由HWC改为CHW def __call__(self, data): image = data[0] key_pts = data[1] transpose = T.Transpose((2, 0, 1)) # 改为CHW image = transpose(image) return image, key_pts # 使用自定义类进行数据预处理 data_transform = T.Compose([ Resize(240), # 240 1444 RandomCrop(224), GrayNormalize(), ToCHW(), ]) data_transform2 = T.Compose([ Resize(240), # 240 1444 GrayNormalize(), ToCHW(), ]) # 使用飞桨的数据增强 import paddle.vision.transforms as T train_transform = T.Compose([ T.Resize(size=[240,240]), # 压缩图像大小 1444 # T.CenterCrop(1440), # 保持图片中心不变并进行裁剪 T.ColorJitter(0.1, 0.1, 0.1, 0.1), # 随机调整图像的亮度,对比度,饱和度和色调。 T.RandomVerticalFlip(0.3), # 基于概率来执行图片的垂直翻转。 T.RandomHorizontalFlip(0.3), # 基于概率来执行图片的水平翻转。 T.RandomRotation(30), # 随机旋转 T.Normalize(mean=[127.5, 127.5, 127.5],std=[127.5, 127.5, 127.5],data_format='HWC'), # 归一化 T.ToTensor(), ]) val_transform=T.Compose([ T.Resize(size=[240,240]), # 1440 T.Normalize(mean=[127.5, 127.5, 127.5],std=[127.5, 127.5, 127.5],data_format='HWC'), T.ToTensor(), ]) # 自定义数据集 path='常规赛:PALM眼底彩照中黄斑中央凹定位/Train/fundus_image/' df=df.sample(frac=1) image_list=[] label_listx=[] label_listy=[] for i in range(len(df)): image_list.append(path+df['imgName'][i]) label_listx.append(df['Fovea_X'][i]) label_listy.append(df['Fovea_Y'][i]) import os test_path='常规赛:PALM眼底彩照中黄斑中央凹定位/PALM-Testing400-Images' test_list=[] test_labelx=[] test_labely=[] list = os.listdir(test_path) # 列出文件夹下所有的目录与文件 for i in range(0, len(list)): path = os.path.join(test_path, list[i]) test_list.append(path) test_labelx.append(0) test_labely.append(0) # 定义数据集类 class dataset(paddle.io.Dataset): def __init__(self,img_list,label_listx,label_listy,transform=None,transform2=None,mode='train'): self.image=img_list self.labelx=label_listx self.labely=label_listy self.mode=mode self.transform=transform self.transform2=transform2 def load_img(self, image_path): img=cv2.imread(image_path,1) img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) return img def __getitem__(self,index): img = self.load_img(self.image[index]) labelx = self.labelx[index] labely = self.labely[index] img_size=img.shape if self.transform: if self.mode=='train': img, label = self.transform([img, [labelx,labely]]) else: img, label = self.transform2([img, [labelx,labely]]) label=np.array(label,dtype='float32') img=np.array(img,dtype='float32') return img,label def __len__(self): return len(self.image) # 训练集、验证集、测试集 radio=0.8 train_list=image_list[:int(len(image_list)*radio)] train_labelx=label_listx[:int(len(label_listx)*radio)] train_labely=label_listy[:int(len(label_listy)*radio)] val_list=image_list[int(len(image_list)*radio):] val_labelx=label_listx[int(len(label_listx)*radio):] val_labely=label_listy[int(len(label_listy)*radio):] train_ds=dataset(train_list,train_labelx,train_labely,data_transform,data_transform2,'train') val_ds=dataset(val_list,val_labelx,val_labely,data_transform,data_transform2,'valid') test_ds=dataset(test_list,test_labelx,test_labely,data_transform,data_transform2,'test') # 查看图片 for i,data in enumerate(train_ds): img,label=data img=img.transpose([1,2,0]) print(img.shape) plt.title(label) plt.imshow(img) if i==0: break # 前向网络构建 class MyNet1(paddle.nn.Layer): def __init__(self,num_classes=2): super(MyNet1,self).__init__() self.net=paddle.vision.resnet152(pretrained=True) self.fc1=paddle.nn.Linear(1000,512) self.relu=paddle.nn.ReLU() self.fc2=paddle.nn.Linear(512,num_classes) def forward(self,inputs): out=self.net(inputs) out=self.fc1(out) out=self.relu(out) out=self.fc2(out) return out class MyNet2(paddle.nn.Layer): def __init__(self): super(MyNet2, self).__init__() self.resnet = paddle.vision.resnet50(pretrained=True, num_classes=0) # remove final fc 输出为[?, 2048, 1, 1] self.flatten = paddle.nn.Flatten() self.linear_1 = paddle.nn.Linear(2048, 512) self.linear_2 = paddle.nn.Linear(512, 256) self.linear_3 = paddle.nn.Linear(256, 2) self.relu = paddle.nn.ReLU() self.dropout = paddle.nn.Dropout(0.2) def forward(self, inputs): y = self.resnet(inputs) y = self.flatten(y) y = self.linear_1(y) y = self.linear_2(y) y = self.relu(y) y = self.dropout(y) y = self.linear_3(y) y = paddle.nn.functional.sigmoid(y) return y class MyNet3(paddle.nn.Layer): def __init__(self,num_classes=2): super(MyNet3,self).__init__() self.net=paddle.vision.resnet152(pretrained=True) self.fc1=paddle.nn.Linear(1000,2000) self.relu=paddle.nn.ReLU() self.dropout1 = paddle.nn.Dropout(0.2) self.fc2=paddle.nn.Linear(2000,num_classes) def forward(self,inputs): out=self.net(inputs) out=self.fc1(out) out=self.relu(out) out = self.dropout1(out) out=self.fc2(out) return out # 自定义损失函数和相关类 from sklearn.metrics.pairwise import euclidean_distances import paddle.nn as nn # 损失函数 def cal_coordinate_Loss(logit, label, alpha = 0.5): """ logit: shape [batch, ndim] label: shape [batch, ndim] ndim = 2 represents coordinate_x and coordinaate_y alpha: weight for MSELoss and 1-alpha for ED loss return: combine MSELoss and ED Loss for x and y, shape [batch, 1] """ alpha = alpha mse_loss = nn.MSELoss(reduction='mean') mse_x = mse_loss(logit[:,0],label[:,0]) mse_y = mse_loss(logit[:,1],label[:,1]) mse_l = 0.5*(mse_x + mse_y) # print('mse_l', mse_l) ed_loss = [] # print(logit.shape[0]) for i in range(logit.shape[0]): logit_tmp = logit[i,:].numpy() label_tmp = label[i,:].numpy() # print('cal_coordinate_loss_ed', logit_tmp, label_tmp) ed_tmp = euclidean_distances([logit_tmp], [label_tmp]) # print('ed_tmp:', ed_tmp[0][0]) ed_loss.append(ed_tmp) ed_l = sum(ed_loss)/len(ed_loss) # print('ed_l', ed_l) # print('alpha', alpha) loss = alpha * mse_l + (1-alpha) * ed_l # print('loss in function', loss) return loss class SelfDefineLoss(paddle.nn.Layer): """ 1. 继承paddle.nn.Layer """ def __init__(self): """ 2. 构造函数根据自己的实际算法需求和使用需求进行参数定义即可 """ super(SelfDefineLoss, self).__init__() def forward(self, input, label): """ 3. 实现forward函数,forward在调用时会传递两个参数:input和label - input:单个或批次训练数据经过模型前向计算输出结果 - label:单个或批次训练数据对应的标签数据 接口返回值是一个Tensor,根据自定义的逻辑加和或计算均值后的损失 """ # 使用Paddle中相关API自定义的计算逻辑 output = cal_coordinate_Loss(input,label) return output # 异步数据加载 train_loader = paddle.io.DataLoader(train_ds, places=paddle.CPUPlace(), batch_size=32, shuffle=True, num_workers=0) val_loader = paddle.io.DataLoader(val_ds, places=paddle.CPUPlace(), batch_size=32, shuffle=False, num_workers=0) test_loader=paddle.io.DataLoader(test_ds, places=paddle.CPUPlace(), batch_size=32, shuffle=False, num_workers=0) # 模型训练与可视化 from utils import NME visualdl=paddle.callbacks.VisualDL(log_dir='visual_log') #定义输入 Batch_size=32 # 每批训练数据的大小 EPOCHS=30 # 训练的总次数 step_each_epoch = len(train_ds)//Batch_size # 使用 paddle.Model 封装模型 model = paddle.Model(MyNet3()) # num_classes=2 lr = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=3e-4, # 学习率初始值 # eta_min=1e-5, # 学习率最终值,默认0 T_max=step_each_epoch * EPOCHS) # 定义Adam优化器 optimizer = paddle.optimizer.Adam(learning_rate=lr, weight_decay=1e-4, # 正则化系数 1e-4 parameters=model.parameters()) # 定义损失:SmoothL1Loss # loss =paddle.nn.SmoothL1Loss() # loss =SelfDefineLoss() loss=paddle.nn.MSELoss(reduction='mean') # 使用自定义metrics metric = NME() model.prepare(optimizer=optimizer, loss=loss, metrics=metric) # model.load('/home/aistudio/work/lup/final') # 启动模型全流程训练 model.fit(train_loader, # 训练数据集 val_loader, # 评估数据集 epochs=EPOCHS, # 训练的总轮次 batch_size=Batch_size, # 训练使用的批大小 save_dir="/home/aistudio/work/lup", #把模型参数、优化器参数保存至自定义的文件夹 save_freq=1, #设定每隔多少个epoch保存模型参数及优化器参数 verbose=1 , # 日志展示形式 callbacks=[visualdl] ) # 设置可视化 # 模型评估 model.load('/home/aistudio/work/lup/6') # final result = model.evaluate(val_loader, verbose=1) print(result) # 模型批量评估 model_path="/home/aistudio/work/lup/" with open('recors/record.txt','w') as f: for i in range(30): model.load('/home/aistudio/work/lup/'+str(i)) result = model.evaluate(val_loader, verbose=1) print('no.',i,result) f.write(str(i)+str(result)+'\n') # 进行预测操作 result = model.predict(test_loader) # 获取测试图片尺寸和图片名 test_path='常规赛:PALM眼底彩照中黄斑中央凹定位/PALM-Testing400-Images' test_size=[] FileName=[] for i in range(len(list)): path = os.path.join(test_path, list[i]) img=cv2.imread(path,1) test_size.append(img.shape) FileName.append(list[i]) test_size=np.array(test_size) # 输出结果文件 result=np.array(result) pred=[] for i in range(len(result[0])): pred.extend(result[0][i]) pred=np.array(pred) pred = paddle.to_tensor(pred) out=np.array(pred).reshape(-1,2) Fovea_X=out[:,0]*data_std+data_mean Fovea_Y=out[:,1]*data_std+data_mean Fovea_X=Fovea_X*test_size[:,1]/224 Fovea_Y=Fovea_Y*test_size[:,0]/224 submission = pd.DataFrame(data={ "FileName": FileName, "Fovea_X": Fovea_X, "Fovea_Y": Fovea_Y }) submission=submission.sort_values(by='FileName') submission.to_csv("result.csv", index=False) # 合并计算输出 import numpy as np import blackhole.dataframe as pd df1=pd.read_csv('result41.958.csv') df2=pd.read_csv('result41.958.csv') df3=pd.read_csv('result49.65447.csv') df4=pd.read_csv('result49.75246.csv') dfs=[df1,df2,df3,df4] File_Name=[] Fovea_X=[] Fovea_Y=[] for i in range(len(df1)): File_Name.append(dfs[0]['FileName'][i]) avgx=(sum(np.array(dfs[x]['Fovea_X'][i]) for x in range(len(dfs))))/len(dfs) avgy=(sum(np.array(dfs[x]['Fovea_Y'][i]) for x in range(len(dfs))))/len(dfs) Fovea_X.append(avgx) Fovea_Y.append(avgy) submission = pd.DataFrame(data={ "FileName": File_Name, "Fovea_X": Fovea_X, "Fovea_Y":Fovea_Y }) submission=submission.sort_values(by='FileName') submission.to_csv("result.csv", index=False) ```
github_jupyter
# 解压数据集 # !unzip -oq /home/aistudio/data/data100477/常规赛:PALM眼底彩照中黄斑中央凹定位.zip # !rm -rf __MACOSX import blackhole.dataframe as pd # 数据读取 import paddle.vision.transforms as T # 数据增强 import matplotlib.pyplot as plt import paddle import cv2 import numpy as np # 我的测试 file_path='常规赛:PALM眼底彩照中黄斑中央凹定位/Train/fundus_image/' # file_path='常规赛:PALM眼底彩照中黄斑中央凹定位/PALM-Testing400-Images/' # file_name='H0001.jpg' list_=os.listdir(file_path) print(len(list_),list_[0]) shapes=[] for f in list_: img=cv2.imread(file_path+f,-1) a=img.shape if(shapes.count(a)==0): shapes.append(a) print(shapes) # plt.imshow(img[:,:,[2,1,0]]) # 查看数据标签 df=pd.read_excel('常规赛:PALM眼底彩照中黄斑中央凹定位/Train/Fovea_Location_train.xlsx') df.head(1) # print(type(df)) # 计算标签的均值和标准差,用于标签的归一化 key_pts_values = df.values[:,1:] # 取出标签信息 data_mean = key_pts_values.mean() # 计算均值 data_std = key_pts_values.std() # 计算标准差 print('标签的均值为:', data_mean) print('标签的标准差为:', data_std) # 数据增强:调整图像大小Resize、随机位置裁剪RandomCrop、灰度化图片GrayNormalize、变更图片通道ToCHW import paddle.vision.transforms.functional as F # 调整图像大小 class Resize(object): # 将输入图像调整为指定大小 def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, data): image = data[0] # 获取图片 key_pts = data[1] # 获取标签 image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) h, w = image_copy.shape[:2] new_h, new_w = self.output_size,self.output_size new_h, new_w = int(new_h), int(new_w) img = F.resize(image_copy, (new_h, new_w)) # scale the pts, too key_pts_copy[::2] = key_pts_copy[::2] * new_w / w key_pts_copy[1::2] = key_pts_copy[1::2] * new_h / h return img, key_pts_copy # 随机位置裁剪 class RandomCrop(object): # 随机位置裁剪输入的图像 def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, data): image = data[0] key_pts = data[1] image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) h, w = image_copy.shape[:2] new_h, new_w = self.output_size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) image_copy = image_copy[top: top + new_h, left: left + new_w] key_pts_copy[::2] = key_pts_copy[::2] - left key_pts_copy[1::2] = key_pts_copy[1::2] - top return image_copy, key_pts_copy # 图片灰度化 class GrayNormalize(object): # 将图片变为灰度图,并将其值放缩到[0, 1] # 将 label 放缩到 [-1, 1] 之间 def __call__(self, data): image = data[0] # 获取图片 key_pts = data[1] # 获取标签 image_copy = np.copy(image) key_pts_copy = np.copy(key_pts) # 灰度化图片 gray_scale = paddle.vision.transforms.Grayscale(num_output_channels=3) image_copy = gray_scale(image_copy) # 将图片值放缩到 [0, 1] image_copy = (image_copy-127.5) / 127.5 # 将坐标点放缩到 [-1, 1] mean = data_mean # 获取标签均值 std = data_std # 获取标签标准差 key_pts_copy = (key_pts_copy - mean)/std return image_copy, key_pts_copy # 变更图片通道 class ToCHW(object): # 将图像的格式由HWC改为CHW def __call__(self, data): image = data[0] key_pts = data[1] transpose = T.Transpose((2, 0, 1)) # 改为CHW image = transpose(image) return image, key_pts # 使用自定义类进行数据预处理 data_transform = T.Compose([ Resize(240), # 240 1444 RandomCrop(224), GrayNormalize(), ToCHW(), ]) data_transform2 = T.Compose([ Resize(240), # 240 1444 GrayNormalize(), ToCHW(), ]) # 使用飞桨的数据增强 import paddle.vision.transforms as T train_transform = T.Compose([ T.Resize(size=[240,240]), # 压缩图像大小 1444 # T.CenterCrop(1440), # 保持图片中心不变并进行裁剪 T.ColorJitter(0.1, 0.1, 0.1, 0.1), # 随机调整图像的亮度,对比度,饱和度和色调。 T.RandomVerticalFlip(0.3), # 基于概率来执行图片的垂直翻转。 T.RandomHorizontalFlip(0.3), # 基于概率来执行图片的水平翻转。 T.RandomRotation(30), # 随机旋转 T.Normalize(mean=[127.5, 127.5, 127.5],std=[127.5, 127.5, 127.5],data_format='HWC'), # 归一化 T.ToTensor(), ]) val_transform=T.Compose([ T.Resize(size=[240,240]), # 1440 T.Normalize(mean=[127.5, 127.5, 127.5],std=[127.5, 127.5, 127.5],data_format='HWC'), T.ToTensor(), ]) # 自定义数据集 path='常规赛:PALM眼底彩照中黄斑中央凹定位/Train/fundus_image/' df=df.sample(frac=1) image_list=[] label_listx=[] label_listy=[] for i in range(len(df)): image_list.append(path+df['imgName'][i]) label_listx.append(df['Fovea_X'][i]) label_listy.append(df['Fovea_Y'][i]) import os test_path='常规赛:PALM眼底彩照中黄斑中央凹定位/PALM-Testing400-Images' test_list=[] test_labelx=[] test_labely=[] list = os.listdir(test_path) # 列出文件夹下所有的目录与文件 for i in range(0, len(list)): path = os.path.join(test_path, list[i]) test_list.append(path) test_labelx.append(0) test_labely.append(0) # 定义数据集类 class dataset(paddle.io.Dataset): def __init__(self,img_list,label_listx,label_listy,transform=None,transform2=None,mode='train'): self.image=img_list self.labelx=label_listx self.labely=label_listy self.mode=mode self.transform=transform self.transform2=transform2 def load_img(self, image_path): img=cv2.imread(image_path,1) img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) return img def __getitem__(self,index): img = self.load_img(self.image[index]) labelx = self.labelx[index] labely = self.labely[index] img_size=img.shape if self.transform: if self.mode=='train': img, label = self.transform([img, [labelx,labely]]) else: img, label = self.transform2([img, [labelx,labely]]) label=np.array(label,dtype='float32') img=np.array(img,dtype='float32') return img,label def __len__(self): return len(self.image) # 训练集、验证集、测试集 radio=0.8 train_list=image_list[:int(len(image_list)*radio)] train_labelx=label_listx[:int(len(label_listx)*radio)] train_labely=label_listy[:int(len(label_listy)*radio)] val_list=image_list[int(len(image_list)*radio):] val_labelx=label_listx[int(len(label_listx)*radio):] val_labely=label_listy[int(len(label_listy)*radio):] train_ds=dataset(train_list,train_labelx,train_labely,data_transform,data_transform2,'train') val_ds=dataset(val_list,val_labelx,val_labely,data_transform,data_transform2,'valid') test_ds=dataset(test_list,test_labelx,test_labely,data_transform,data_transform2,'test') # 查看图片 for i,data in enumerate(train_ds): img,label=data img=img.transpose([1,2,0]) print(img.shape) plt.title(label) plt.imshow(img) if i==0: break # 前向网络构建 class MyNet1(paddle.nn.Layer): def __init__(self,num_classes=2): super(MyNet1,self).__init__() self.net=paddle.vision.resnet152(pretrained=True) self.fc1=paddle.nn.Linear(1000,512) self.relu=paddle.nn.ReLU() self.fc2=paddle.nn.Linear(512,num_classes) def forward(self,inputs): out=self.net(inputs) out=self.fc1(out) out=self.relu(out) out=self.fc2(out) return out class MyNet2(paddle.nn.Layer): def __init__(self): super(MyNet2, self).__init__() self.resnet = paddle.vision.resnet50(pretrained=True, num_classes=0) # remove final fc 输出为[?, 2048, 1, 1] self.flatten = paddle.nn.Flatten() self.linear_1 = paddle.nn.Linear(2048, 512) self.linear_2 = paddle.nn.Linear(512, 256) self.linear_3 = paddle.nn.Linear(256, 2) self.relu = paddle.nn.ReLU() self.dropout = paddle.nn.Dropout(0.2) def forward(self, inputs): y = self.resnet(inputs) y = self.flatten(y) y = self.linear_1(y) y = self.linear_2(y) y = self.relu(y) y = self.dropout(y) y = self.linear_3(y) y = paddle.nn.functional.sigmoid(y) return y class MyNet3(paddle.nn.Layer): def __init__(self,num_classes=2): super(MyNet3,self).__init__() self.net=paddle.vision.resnet152(pretrained=True) self.fc1=paddle.nn.Linear(1000,2000) self.relu=paddle.nn.ReLU() self.dropout1 = paddle.nn.Dropout(0.2) self.fc2=paddle.nn.Linear(2000,num_classes) def forward(self,inputs): out=self.net(inputs) out=self.fc1(out) out=self.relu(out) out = self.dropout1(out) out=self.fc2(out) return out # 自定义损失函数和相关类 from sklearn.metrics.pairwise import euclidean_distances import paddle.nn as nn # 损失函数 def cal_coordinate_Loss(logit, label, alpha = 0.5): """ logit: shape [batch, ndim] label: shape [batch, ndim] ndim = 2 represents coordinate_x and coordinaate_y alpha: weight for MSELoss and 1-alpha for ED loss return: combine MSELoss and ED Loss for x and y, shape [batch, 1] """ alpha = alpha mse_loss = nn.MSELoss(reduction='mean') mse_x = mse_loss(logit[:,0],label[:,0]) mse_y = mse_loss(logit[:,1],label[:,1]) mse_l = 0.5*(mse_x + mse_y) # print('mse_l', mse_l) ed_loss = [] # print(logit.shape[0]) for i in range(logit.shape[0]): logit_tmp = logit[i,:].numpy() label_tmp = label[i,:].numpy() # print('cal_coordinate_loss_ed', logit_tmp, label_tmp) ed_tmp = euclidean_distances([logit_tmp], [label_tmp]) # print('ed_tmp:', ed_tmp[0][0]) ed_loss.append(ed_tmp) ed_l = sum(ed_loss)/len(ed_loss) # print('ed_l', ed_l) # print('alpha', alpha) loss = alpha * mse_l + (1-alpha) * ed_l # print('loss in function', loss) return loss class SelfDefineLoss(paddle.nn.Layer): """ 1. 继承paddle.nn.Layer """ def __init__(self): """ 2. 构造函数根据自己的实际算法需求和使用需求进行参数定义即可 """ super(SelfDefineLoss, self).__init__() def forward(self, input, label): """ 3. 实现forward函数,forward在调用时会传递两个参数:input和label - input:单个或批次训练数据经过模型前向计算输出结果 - label:单个或批次训练数据对应的标签数据 接口返回值是一个Tensor,根据自定义的逻辑加和或计算均值后的损失 """ # 使用Paddle中相关API自定义的计算逻辑 output = cal_coordinate_Loss(input,label) return output # 异步数据加载 train_loader = paddle.io.DataLoader(train_ds, places=paddle.CPUPlace(), batch_size=32, shuffle=True, num_workers=0) val_loader = paddle.io.DataLoader(val_ds, places=paddle.CPUPlace(), batch_size=32, shuffle=False, num_workers=0) test_loader=paddle.io.DataLoader(test_ds, places=paddle.CPUPlace(), batch_size=32, shuffle=False, num_workers=0) # 模型训练与可视化 from utils import NME visualdl=paddle.callbacks.VisualDL(log_dir='visual_log') #定义输入 Batch_size=32 # 每批训练数据的大小 EPOCHS=30 # 训练的总次数 step_each_epoch = len(train_ds)//Batch_size # 使用 paddle.Model 封装模型 model = paddle.Model(MyNet3()) # num_classes=2 lr = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=3e-4, # 学习率初始值 # eta_min=1e-5, # 学习率最终值,默认0 T_max=step_each_epoch * EPOCHS) # 定义Adam优化器 optimizer = paddle.optimizer.Adam(learning_rate=lr, weight_decay=1e-4, # 正则化系数 1e-4 parameters=model.parameters()) # 定义损失:SmoothL1Loss # loss =paddle.nn.SmoothL1Loss() # loss =SelfDefineLoss() loss=paddle.nn.MSELoss(reduction='mean') # 使用自定义metrics metric = NME() model.prepare(optimizer=optimizer, loss=loss, metrics=metric) # model.load('/home/aistudio/work/lup/final') # 启动模型全流程训练 model.fit(train_loader, # 训练数据集 val_loader, # 评估数据集 epochs=EPOCHS, # 训练的总轮次 batch_size=Batch_size, # 训练使用的批大小 save_dir="/home/aistudio/work/lup", #把模型参数、优化器参数保存至自定义的文件夹 save_freq=1, #设定每隔多少个epoch保存模型参数及优化器参数 verbose=1 , # 日志展示形式 callbacks=[visualdl] ) # 设置可视化 # 模型评估 model.load('/home/aistudio/work/lup/6') # final result = model.evaluate(val_loader, verbose=1) print(result) # 模型批量评估 model_path="/home/aistudio/work/lup/" with open('recors/record.txt','w') as f: for i in range(30): model.load('/home/aistudio/work/lup/'+str(i)) result = model.evaluate(val_loader, verbose=1) print('no.',i,result) f.write(str(i)+str(result)+'\n') # 进行预测操作 result = model.predict(test_loader) # 获取测试图片尺寸和图片名 test_path='常规赛:PALM眼底彩照中黄斑中央凹定位/PALM-Testing400-Images' test_size=[] FileName=[] for i in range(len(list)): path = os.path.join(test_path, list[i]) img=cv2.imread(path,1) test_size.append(img.shape) FileName.append(list[i]) test_size=np.array(test_size) # 输出结果文件 result=np.array(result) pred=[] for i in range(len(result[0])): pred.extend(result[0][i]) pred=np.array(pred) pred = paddle.to_tensor(pred) out=np.array(pred).reshape(-1,2) Fovea_X=out[:,0]*data_std+data_mean Fovea_Y=out[:,1]*data_std+data_mean Fovea_X=Fovea_X*test_size[:,1]/224 Fovea_Y=Fovea_Y*test_size[:,0]/224 submission = pd.DataFrame(data={ "FileName": FileName, "Fovea_X": Fovea_X, "Fovea_Y": Fovea_Y }) submission=submission.sort_values(by='FileName') submission.to_csv("result.csv", index=False) # 合并计算输出 import numpy as np import blackhole.dataframe as pd df1=pd.read_csv('result41.958.csv') df2=pd.read_csv('result41.958.csv') df3=pd.read_csv('result49.65447.csv') df4=pd.read_csv('result49.75246.csv') dfs=[df1,df2,df3,df4] File_Name=[] Fovea_X=[] Fovea_Y=[] for i in range(len(df1)): File_Name.append(dfs[0]['FileName'][i]) avgx=(sum(np.array(dfs[x]['Fovea_X'][i]) for x in range(len(dfs))))/len(dfs) avgy=(sum(np.array(dfs[x]['Fovea_Y'][i]) for x in range(len(dfs))))/len(dfs) Fovea_X.append(avgx) Fovea_Y.append(avgy) submission = pd.DataFrame(data={ "FileName": File_Name, "Fovea_X": Fovea_X, "Fovea_Y":Fovea_Y }) submission=submission.sort_values(by='FileName') submission.to_csv("result.csv", index=False)
0.129788
0.374162
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://covid.ourworldindata.org/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=14) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Source Countries: {display_brief_source_regions} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio: {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Spain): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://covid.ourworldindata.org/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=14) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Source Countries: {display_brief_source_regions} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio: {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Spain): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids)
0.272702
0.213787
### This notebook is optionally accelerated with a GPU runtime. ### If you would like to use this acceleration, please select the menu option "Runtime" -> "Change runtime type", select "Hardware Accelerator" -> "GPU" and click "SAVE" ---------------------------------------------------------------------- # YOLOP *Author: Hust Visual Learning Team* **YOLOP pretrained on the BDD100K dataset** ## Before You Start To install YOLOP dependencies: ``` %%bash pip install -qr https://github.com/hustvl/YOLOP/blob/main/requirements.txt # install dependencies ``` ## YOLOP: You Only Look Once for Panoptic driving Perception ### Model Description <img width="800" alt="YOLOP Model" src="https://github.com/hustvl/YOLOP/raw/main/pictures/yolop.png"> &nbsp; - YOLOP is an efficient multi-task network that can jointly handle three crucial tasks in autonomous driving: object detection, drivable area segmentation and lane detection. And it is also the first to reach real-time on embedded devices while maintaining state-of-the-art level performance on the **BDD100K** dataset. ### Results #### Traffic Object Detection Result | Model | Recall(%) | mAP50(%) | Speed(fps) | | -------------- | --------- | -------- | ---------- | | `Multinet` | 81.3 | 60.2 | 8.6 | | `DLT-Net` | 89.4 | 68.4 | 9.3 | | `Faster R-CNN` | 77.2 | 55.6 | 5.3 | | `YOLOv5s` | 86.8 | 77.2 | 82 | | `YOLOP(ours)` | 89.2 | 76.5 | 41 | #### Drivable Area Segmentation Result | Model | mIOU(%) | Speed(fps) | | ------------- | ------- | ---------- | | `Multinet` | 71.6 | 8.6 | | `DLT-Net` | 71.3 | 9.3 | | `PSPNet` | 89.6 | 11.1 | | `YOLOP(ours)` | 91.5 | 41 | #### Lane Detection Result | Model | mIOU(%) | IOU(%) | | ------------- | ------- | ------ | | `ENet` | 34.12 | 14.64 | | `SCNN` | 35.79 | 15.84 | | `ENet-SAD` | 36.56 | 16.02 | | `YOLOP(ours)` | 70.50 | 26.20 | #### Ablation Studies 1: End-to-end v.s. Step-by-step | Training_method | Recall(%) | AP(%) | mIoU(%) | Accuracy(%) | IoU(%) | | --------------- | --------- | ----- | ------- | ----------- | ------ | | `ES-W` | 87.0 | 75.3 | 90.4 | 66.8 | 26.2 | | `ED-W` | 87.3 | 76.0 | 91.6 | 71.2 | 26.1 | | `ES-D-W` | 87.0 | 75.1 | 91.7 | 68.6 | 27.0 | | `ED-S-W` | 87.5 | 76.1 | 91.6 | 68.0 | 26.8 | | `End-to-end` | 89.2 | 76.5 | 91.5 | 70.5 | 26.2 | #### Ablation Studies 2: Multi-task v.s. Single task | Training_method | Recall(%) | AP(%) | mIoU(%) | Accuracy(%) | IoU(%) | Speed(ms/frame) | | --------------- | --------- | ----- | ------- | ----------- | ------ | --------------- | | `Det(only)` | 88.2 | 76.9 | - | - | - | 15.7 | | `Da-Seg(only)` | - | - | 92.0 | - | - | 14.8 | | `Ll-Seg(only)` | - | - | - | 79.6 | 27.9 | 14.8 | | `Multitask` | 89.2 | 76.5 | 91.5 | 70.5 | 26.2 | 24.4 | **Notes**: - In table 4, E, D, S and W refer to Encoder, Detect head, two Segment heads and whole network. So the Algorithm (First, we only train Encoder and Detect head. Then we freeze the Encoder and Detect head as well as train two Segmentation heads. Finally, the entire network is trained jointly for all three tasks.) can be marked as ED-S-W, and the same for others. ### Visualization #### Traffic Object Detection Result <img width="800" alt="Traffic Object Detection Result" src="https://github.com/hustvl/YOLOP/raw/main/pictures/detect.png"> &nbsp; #### Drivable Area Segmentation Result <img width="800" alt="Drivable Area Segmentation Result" src="https://github.com/hustvl/YOLOP/raw/main/pictures/da.png"> &nbsp; #### Lane Detection Result <img width="800" alt="Lane Detection Result" src="https://github.com/hustvl/YOLOP/raw/main/pictures/ll.png"> &nbsp; **Notes**: - The visualization of lane detection result has been post processed by quadratic fitting. ### Deployment Our model can reason in real-time on **Jetson Tx2**, with **Zed Camera** to capture image. We use **TensorRT** tool for speeding up. We provide code for deployment and reasoning of model in [github code](https://github.com/hustvl/YOLOP/tree/main/toolkits/deploy). ### Load From PyTorch Hub This example loads the pretrained **YOLOP** model and passes an image for inference. ``` import torch # load model model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True) #inference img = torch.randn(1,3,640,640) det_out, da_seg_out,ll_seg_out = model(img) ``` ### Citation See for more detail in [github code](https://github.com/hustvl/YOLOP) and [arxiv paper](https://arxiv.org/abs/2108.11250). If you find our paper and code useful for your research, please consider giving a star and citation:
github_jupyter
%%bash pip install -qr https://github.com/hustvl/YOLOP/blob/main/requirements.txt # install dependencies import torch # load model model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True) #inference img = torch.randn(1,3,640,640) det_out, da_seg_out,ll_seg_out = model(img)
0.579162
0.890675
# Introduction ## The Data Set In today's workshop, we will revisit the data set you worked with in the Machine Learning workshop. As a refresher: this data set is from the GSE53987 dataset on Bipolar disorder (BD) and major depressive disorder (MDD) and schizophrenia: Lanz TA, Joshi JJ, Reinhart V, Johnson K et al. STEP levels are unchanged in pre-frontal cortex and associative striatum in post-mortem human brain samples from subjects with schizophrenia, bipolar disorder and major depressive disorder. PLoS One 2015;10(3):e0121744. PMID: 25786133 This is a microarray data on platform GPL570 (HG-U133_Plus_2, Affymetrix Human Genome U133 Plus 2.0 Array) consisting of 54675 probes. The raw CEL files of the GEO series were downloaded, frozen-RMA normalized, and the probes have been converted to HUGO gene symbols using the annotate package averaging on genes. The sample clinical data (meta-data) was parsed from the series matrix file. You can download it **here**. In total there are 205 rows consisting of 19 individuals diagnosed with BPD, 19 with MDD, 19 schizophrenia and 19 controls. Each sample has gene expression from 3 tissues (post-mortem brain). There are a total of 13768 genes (numeric features) and 10 meta features and 1 ID (GEO sample accession): - Age - Race (W for white and B for black) - Gender (F for female and M for male) - Ph: pH of the brain tissue - Pmi: post mortal interval - Rin: RNA integrity number - Patient: Unique ID for each patient. Each patient has up to 3 tissue samples. The patient ID is written as disease followed by a number from 1 to 19 - Tissue: tissue the expression was obtained from. - Disease.state: class of disease the patient belongs to: bipolar, schizophrenia, depression or control. - source.name: combination of the tissue and disease.state ## Workshop Goals This workshop will walk you through an analysis of the GSE53987 microarray data set. This workshop has the following three tasks: 1. Visualize the demographics of the data set 2. Cluster gene expression data and appropriately visualize the cluster results 3. Compute differential gene expression and visualize the differential expression Each task has a __required__ section and a __bonus__ section. Focus on completing the three __required__ sections first, then if you have time at the end, revisit the __bonus__ sections. Finally, as this is your final workshop, we hope that you will this as an opportunity to integrate the different concepts that you have learned in previous workshops. ## Workshop Logistics As mentioned in the pre-workshop documentation, you can do this workshop either in a Jupyter Notebook, or in a python script. Please make sure you have set-up the appropriate environment for youself. This workshop will be completed using "paired-programming" and the "driver" will switch every 15 minutes. Also, we will be using the python plotting libraries matplotlib and seaborn. ## TASK 0: Import Libraries and Data - Download the data set (above) as a .csv file - Initialize your script by loading the following libraries. ``` # Import Necessary Libraries import pandas as pd import numpy as np import seaborn as sns from sklearn import cluster, metrics, decomposition from matplotlib import pyplot as plt import itertools data = pd.read_csv('GSE53987_combined.csv', index_col=0) genes = data.columns[10:] ``` ## TASK 1: Visualize Dataset Demographics ### Required Workshop Task: ##### Use the skeleton code to write 3 plotting functions: 1. plot_distribution() - Returns a distribution plot object given a dataframe and one observation 2. plot_relational() - Returns a distribution plot object given a dataframe and (x,y) observations 3. plot_categorical() - Returns a categorical plot object given a dataframe and (x,y) observations ##### Use these functions to produce the following plots: 1. Histogram of patient ages 2. Histogram of gene expression for 1 gene 3. Scatter plot of gene expression for 1 gene by ages 4. Scatter plot of gene expression for 1 gene by disease state Your plots should satisfy the following critical components: - Axis titles - Figure title - Legend (if applicable) - Be readable ### Bonus Task: 1. Return to these functions and include functionality to customize color palettes, axis legends, etc. You can choose to define your own plotting "style" and keep that consistent for all of your plotting functions. 2. Faceting your plots. Modify your functions to take in a "facet" argument that when facet is an observation, the function will create a facet grid and facet on that observation. Read more about faceting here: Faceting generates multi-plot grids by __mapping a dataset onto multiple axes arrayed in a grid of rows and columns that correspond to levels of variables in the dataset.__ - In order to use facteting, your data __must be__ in a Pandas DataFrame and it must take the form of what Hadley Whickam calls “tidy” data. - In brief, that means your dataframe should be structured such that each column is a variable and each row is an observation. There are figure-level functions (e.g. relplot() or catplot()) that will create facet grids automatically and can be used in place of things like distplot() or scatterplot(). ``` # Import the data (.csv file) as a data frame data = pd.read_csv("/Users/ebriars/Desktop/Bioinformatics/BRITE REU Workshops/Data Visualization/GSE53987_combined.csv", index_col=0) # Function to Plot a Distribtion def plot_distribution(df, obs1, obs2=''): """ Create a distribution plot for at least one observation Arguments: df (pandas data frame): data frame containing at least 1 column of numerical values obs1 (string): observation to plot distribution on obs2 (string, optional) Returns: axes object """ if obs2 == '': ax = sns.distplot(df[obs1]) else: ax = sns.FacetGrid(df, hue=obs2) ax = (g.map(sns.distplot, obs1, hist=False)) return ax # Function to Plot Relational (x,y) Plots def plot_relational(df, x, y, hue=None, kind=None): """ Create a plot for an x,y relationship (default = scatter plot) Optional functionality for additional observations. Arguments: df (pandas data frame): data frame containing at least 2 columns of numerical values x (string): observation for the independent variable y (string): observation for the dependent variable hue (string, optional): additional observation to color the plot on kind (string, optional): type of plot to create [scatter, line] Returns: axes object """ if kind == None or kind == "scatter": ax = sns.scatterplot(data=df, x=x, y=y, hue=hue) else: ax = sns.lineplot(data=df, x=x, y=y, hue=hue) return ax def plot_categorical(df, x, y, hue=None, kind=None): """ Create a plot for an x,y relationship where x is categorical (not numerical) Arguments: df (pandas data frame): data frame containing at least 2 columns of numerical values x (string): observation for the independent variable (categorical) y (string): observation for the dependent variable hue (string, optional): additional observation to color the plot on kind (string, optional): type of plot to create. Options should include at least: strip (default), box, and violin """ if kind == None or kind == "strip": ax = sns.stripplot(data=df, x=x, y=y, hue=hue) elif kind == "violin": ax = sns.violinplot(data=df, x=x, y=y, hue=hue) elif kind == "box": ax = sns.boxplot(data=df, x=x, y=y, hue=hue) return ax def main(): """ Generate the following plots: 1. Histogram of patient ages 2. Histogram of gene expression for 1 gene 3. Scatter plot of gene expression for 1 gene by ages 4. Scatter plot of gene expression for 1 gene by disease state """ ``` ## TASK 2: Differential Expression Analysis Differential expression analysis is a fancy way of saying, "We want to find which genes exhibit increased or decreased expression compared to a control group". Neat. Because the dataset we're working with is MicroArray data -- which is mostly normally distributed -- we'll be using a simple One-Way ANOVA. If, however, you were working with sequence data -- which follows a Negative Binomial distribution -- you would need more specialized tools. A helper function is provided below. ``` def differential_expression(data, group_col, features, reference=None): """ Perform a one-way ANOVA across all provided features for a given grouping. Arguments --------- data : (pandas.DataFrame) DataFrame containing group information and feature values. group_col : (str) Column in `data` containing sample group labels. features : (list, numpy.ndarray): Columns in `data` to test for differential expression. Having them be gene names would make sense. :thinking: reference : (str, optional) Value in `group_col` to use as the reference group. Default is None, and the value will be chosen. Returns ------- pandas.DataFrame A DataFrame of differential expression results with columns for fold changes between groups, maximum fold change from reference, f values, p values, and adjusted p-values by Bonferroni correction. """ if group_col not in data.columns: raise ValueError("`group_col` {} not found in data".format(group_col)) if any([x not in data.columns for x in features]): raise ValueError("Not all provided features found in data.") if reference is None: reference = data[group_col].unique()[0] print("No reference group provided. Using {}".format(reference)) elif reference not in data[group_col].unique(): raise ValueError("Reference value {} not found in column {}.".format( reference, group_col)) by_group = data.groupby(group_col) reference_avg = by_group.get_group(reference).loc[:,features].mean() values = [] results = {} for each, index in by_group.groups.items(): values.append(data.loc[index, features]) if each != reference: key = "{}.FoldChange".format(each) results[key] = data.loc[index, features].mean()\ / reference_avg fold_change_cols = list(results.keys()) fvalues, pvalues = stats.f_oneway(*values) results['f.value'] = fvalues results['p.value'] = pvalues results['p.value.adj'] = pvalues * len(features) results_df = pd.DataFrame(results) def largest_deviation(x): i = np.where(abs(x) == max(abs(x)))[0][0] return x[i] results_df['Max.FoldChange'] = results_df[fold_change_cols].apply( lambda x: largest_deviation(x.values), axis=1) return results_df # Here's some pre-subsetted data hippocampus = data[data["Tissue"] == "hippocampus"] pf_cortex = data[data["Tissue"] == "Pre-frontal cortex (BA46)"] as_striatum = data[data["Tissue"] == "Associative striatum"] # Here's how we can subset a dataset by two conditions. # You might find it useful :thinking: data[(data["Tissue"] == 'hippocampus') & (data['Disease.state'] == 'control')] ``` ### Task 2a: Volcano Plots Volcano plots are ways to showcase the number of differentially expressed genes found during high throughput sequencing analysis. Log fold changes are plotted along the x-axis, while p-values are plotted along the y-axis. Genes are marked significant if they exceed some absolute Log fold change theshold **as well** some p-value level for significance. This can be seen in the plot below. ![](https://galaxyproject.github.io/training-material/topics/transcriptomics/images/rna-seq-viz-with-volcanoplot/volcanoplot.png) Your first task will be to generate some Volcano plots: **Requirments** 1. Use the provided function to perform an ANOVA (analysis of variance) between control and experimental groups in each tissue. - Perform a separate analysis for each tissue. 2. Implement the skeleton function to create a volcano plot to visualize both the log fold change in expression values and the adjusted p-values from the ANOVA 3. Highlight significant genes with distinct colors ``` def volcano_plot(data, sig_col, fc_col, sig_thresh, fc_thresh): """ Generate a volcano plot to showcasing differentially expressed genes. Parameters ---------- data : (pandas.DataFrame) A data frame containing differential expression results sig_col : str Column in `data` with adjusted p-values. fc_col : str Column in `data` with fold changes. sig_thresh : str Threshold for statistical significance. fc_thresh """ data['significant'] = False data[fc_col] = np.log2(data[fc_col]) de_genes = (data[sig_col] < sig_thesh) & (data[fc_col].abs() > fc_thresh) data.loc[de_genes, 'significant'] = True ax = sns.scatterplot(x=fc_col, y=sig_col, hue='significant', data=data, palette=['black', 'red'], alpha=0.75) linewidth = plt.rcParams['lines.linewidth'] - 1 plt.axvline(x=fc_thresh, linestyle='--', linewidth=linewidth, color='#4D4E4F') plt.axvline(x=-fc_thresh, linestyle='--', linewidth=linewidth, color='#4D4E4F') plt.axhline(y=sig_thresh, linestyle='--', linewidth=linewidth, color='#4D4E4F') ax.legend().set_visible(False) ylabel = sig_col if sig_col.lower() == 'fdr': ylabel = 'False Discovery Rate' plt.xlabel(r"$log_2$ Fold Change") plt.ylabel(ylabel) for spine in ['right', 'top']: ax.spines[spine].set_visible(False) plt.tight_layout() return ax ``` ### Task 2b: Plot the Top 1000 Differentially Expressed Genes Clustered heatmaps are hugely popular for displaying differences in gene expression values. To reference such a plot, look back at the introductory material. Here we will be plotting the 1000 most differentially expressed genes for each of the analysis performed before. **Requirements** - Implement the skeleton function below - Z normalize gene values - Use a diverging and perceptually uniform colormap - Generate plots for each of the DE results above **Hint**: Look over all the options for [sns.clustermap()](https://seaborn.pydata.org/generated/seaborn.clustermap.html). It might make things easier. ``` def heatmap(data, genes, group_col): """[summary] Parameters ---------- data : pd.DataFrame A (sample x gene) data matrix containing gene expression values for each sample. genes : list, str List of genes to plot """ plot_data = anno_df[:, genes] ax = sns.clustermap(data, cmap='RdBu_r', z_score=1) return ax ``` **Bonus** There's nothing denoting which samples belong to which experimental group. Fix it. *Bonus hint*: Look real close at the documentation. ## TASK 3: Clustering Analysis You've seen clustering in the previous machine learning workshop. Some basic plots were generated for you, including plotting the clusters on the principle componets. While we can certainly do more of that, we will also be introducing two new plots: elbow plots and silhouette plots. ### Elbow Plots Elbow plots are plots that are used to help diagnose the perennial question of K-means clustering: how do I chose K? To create the graph, you plot the number of clusters on the x-axis and some evaluation of "cluster goodness" on the y-axis. Looking at the name of the plot, you might guess that we're looking for an "elbow". This is the point in the graph when we start getting diminished returns in performance, and specifying more clusters may lead to over-clustering the data. An example plot is shown below. ![](https://upload.wikimedia.org/wikipedia/commons/c/cd/DataClustering_ElbowCriterion.JPG) You can see the K selected (K = 3), is right before diminishing returns start to kick in. Mathematically, this point is defined as the point in which curvature is maximized. However, the inflection point is also a decent -- though more conservative -- estimate. However, we'll just stick to eye-balling it for this workshop. If you would like to know how to automatically find the elbow point, more information can be found [here](https://raghavan.usc.edu/papers/kneedle-simplex11.pdf) ### Task 2a: Implement a function that creates an elbow plot Skeleton code is provided below. The function expects a list of k-values and their associated scores. An optional "ax" parameter is also provided. This parameter should be an axes object and can be created by issueing the following command: ```ax = plt.subplot()``` While we won't need the parameter right now, we'll likely use it in the future. **Function Requirements** - Generate plot data by clustering the entire dataset on the first 50 principle components. Vary K values from 2 - 10. - While you've been supplied a helper function for clustering, you'll need to supply the principle components yourself. Refer to your machine learning workshop along with the scikit-learn [documentation](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) - Plots each k and it's associated value. - Plots lines connecting each data point. - Produces a plot with correctly labelled axes. **Hint:** Working with an axis object is similar to base matplotlib, except `plt.scatter()` might become something like `ax.scatter()`. #### Helper Function ``` def cluster_data(X, k): """ Cluster data using K-Means. Parameters ---------- X : (numpy.ndarray) Data matrix to cluster samples on. Should be (samples x features). k : int Number of clusters to find. Returns ------- tuple (numpy.ndarray, float) A tuple where the first value is the assigned cluster labels for each sample, and the second value is the score associated with the particular clustering. """ model = cluster.KMeans(n_clusters=k).fit(X) score = model.score(X) return (model.labels_, score) ``` #### Task 2a Implementation ``` def elbow_plot(ks, scores, best=None, ax=None): """ Create a scatter plot to aid in choosing the number of clusters using K-means. Arguments --------- ks : (numpy.ndarray) Tested values for the number of clusters. scores: (numpy.ndarray) Cluster scores associated with each number K. ax: plt.Axes Object, optional """ if ax is None: fig, ax = plt.subplots() ax.scatter(ks, scores) ax.plot(ks, scores) ax.set_xlabel("Number of Clusters") ax.set_ylabel("Negative Distance From Mean") return ax ``` Once you've created the base plotting function, you'll probably realize we have no indivation of where the elbow point is. Fix this by adding another optional parameter (`best`) to your function. The parameter `best` should be the K value that produces the elbow point. **Function Requirements** - Add an optional parameter `best` that if supplied denotes the elbow point with a vertical, dashed line. - If `best` is not supplied, the plot should still be produced but without denoting the elbow point. **Hint**: `plt.axvline` and `plt.axhline` can be used to produce vertical and horizontal lines, respectively. More information [here](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.axvline.html) **Note**: You are not required to have the line end at the associated score value. ``` def elbow_plot(ks, scores, best=None, ax=None): """ Create a scatter plot to aid in choosing the number of clusters using K-means. Arguments --------- ks : (numpy.ndarray) Tested values for the number of clusters. scores: (numpy.ndarray) Cluster scores associated with each number K. best: int, optional The best value for K. Determined by the K that falls at the elbow. If passed, a black dashed line will be plotted to indicate the best. Default is no line. ax: plt.Axes Object, optional """ if ax is None: fig, ax = plt.subplots() ax.scatter(ks, scores) ax.plot(ks, scores) ax.set_xlabel("Number of Clusters") ax.set_ylabel("Negative Distance From Mean") if best is not None: if best not in ks: raise ValueError("{} not included in provided number " "of clusters.") idx = np.where(np.array(ks) == best)[0][0] ymin, ymax = ax.get_ylim() point = (scores[idx] - ymin) / (ymax - ymin) print(point) ax.axvline(x=best, ymax=point, linestyle="--", c='black') ax.scatter([3], scores[idx], edgecolor='black', facecolor="none", s=200) ax.set_title("Elbow at K={}".format(best), loc='left') return ax ``` ### Silhouette Plots Silhouette plots are another way to visually diagnose cluster performance. They are created by finding the [silhouette coefficient](https://en.wikipedia.org/wiki/Silhouette_(clustering)) for each sample in the data, and plotting an area graph for each cluster. The silhouette coefficient measures how well-separated clusters are from each other. The value ranges from $[-1 , 1]$, where 1 indicates good separation, 0 indicates randomness, and -1 indicates mixing of clusters. An example is posted below. ![](https://scikit-plot.readthedocs.io/en/stable/_images/plot_silhouette.png) As you can see, each sample in each cluster has the area filled from some minimal point (usually 0 or the minimum score in the dataset) and clusters are separated to produce distinct [silhouettes](https://www.youtube.com/watch?v=-TcUvXzgwMY). ### Task 3b: Implement a function to plot silhouette coefficients Because the code for create a silhouette plot can be a little bit involved, we've created both a skeleton function with documentation, and provided the following pseudo-code: ``` - Calculate scores for each sample. - Get a set of unique sample labels. - Set a score minimum - Initialize variables y_lower, and y_step - y_lower is the lower bound on the x-axis for the first cluster's silhouette - y_step is the distance between cluster silhouettes - Initialize variable, breaks - breaks are the middle point of each cluster silhouette and will be used to position the axis label - Interate through each cluster label, for each cluster: - Calcaluate the variable y_upper by adding the number of samples - Fill the area between y_lower and y_upper using the silhoutte scores for each sample - Calculate middle point of y distance. Append the variable break. - Calculate new y_lower value - Label axes with appropriate names and tick marks - Create dashed line at the average silhouette score over all samples ``` **Hint**: you might find [ax.fill_betweenx()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.fill_betweenx.html) and [ax.set_yticks()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.set_yticks.html?highlight=set_yticks#matplotlib.axes.Axes.set_yticks)/ [ax.set_yticklabels()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.set_yticklabels.html?highlight=set_yticklabels#matplotlib.axes.Axes.set_yticklabels) useful. ``` def silhouette_plot(X, y, ax=None): """ Plot silhouette scores for all samples across clusters. Parameters ---------- X : numpy.ndarray Numerical data used to cluster the data. y : numpy.ndarray Cluster labels assigned to each sample. ax : matplotlib.Axes Axis object to plot scores onto. Default is None, and a new axis will be created. Returns ------- matplotlib.Axes """ if ax is None: ax = plt.subplot() scores = metrics.silhouette_samples(X, y) clusters = sorted(np.unique(y)) score_min = 0 y_lower, y_step = 5, 5 props = plt.rcParams['axes.prop_cycle'] colors = itertools.cycle(props.by_key()['color']) breaks = [] for each, color in zip(clusters, colors): # Aggregate the silhouette scores for samples, sort scores for # area filling cluster_scores = scores[y == each] cluster_scores.sort() y_upper = y_lower + len(cluster_scores) ax.fill_betweenx(np.arange(y_lower, y_upper), score_min, cluster_scores, facecolor=color, edgecolor=color, alpha=0.7, label=each) breaks.append((y_upper + y_lower) / 2) # Compute the new y_lower for next plot y_lower = y_upper + y_step plt.legend() ax.set_xlabel("Silhouette Coefficient") ax.set_ylabel("Cluster") # Vertical line for threshold ax.set_yticks(breaks) ax.set_yticklabels(clusters) ax.axvline(x=0, linestyle="-", linewidth=2, c='black') ax.axvline(x=np.mean(scores), linestyle='--', c='black') plt.tight_layout() return ax ``` ### Task 3C: Put it all together! **Requirements** - Create a function `cluster_and_plot` that will cluster a provided dataset for a range of k-values - The function should return a single figure with two subplots: - An elbow plot with the "best" K value distinguished - A silhouette plot associated with clustering determined by the provided K value. - Appropriate axes labels **Hint**: You will likely find [plt.subplots()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.subplots.html?highlight=subplots#matplotlib.pyplot.subplots) useful. ``` def cluster_and_plot(X, best=3, kmax=10): """ Cluster samples using KMeans and display the results. Results are displayed in a (1 x 2) figure, where the first subplot is an elbow plot and the second subplot is a silhouette plot. Parameters ---------- X : (numpy.ndarray) A (sample x features) data matrix used to cluster samples. best : int, optional Final value of K to use for K-Means clustering. Default is 3. kmax : int, optional Maximum number of clusters to plot in the elbow plot. Default is 10. Returns ------- matplotlib.Figure Clustering results. """ fig, axes = plt.subplots(nrows=1, ncols=2) scores = np.array([cluster_data(X, k)[1] for k in np.arange(1, kmax+1)]) y, score = cluster_data(X, best) elbow_plot(np.arange(1, kmax + 1), scores, best, axes[0]) silhouette_plot(X, y, axes[1]) return fig ```
github_jupyter
# Import Necessary Libraries import pandas as pd import numpy as np import seaborn as sns from sklearn import cluster, metrics, decomposition from matplotlib import pyplot as plt import itertools data = pd.read_csv('GSE53987_combined.csv', index_col=0) genes = data.columns[10:] # Import the data (.csv file) as a data frame data = pd.read_csv("/Users/ebriars/Desktop/Bioinformatics/BRITE REU Workshops/Data Visualization/GSE53987_combined.csv", index_col=0) # Function to Plot a Distribtion def plot_distribution(df, obs1, obs2=''): """ Create a distribution plot for at least one observation Arguments: df (pandas data frame): data frame containing at least 1 column of numerical values obs1 (string): observation to plot distribution on obs2 (string, optional) Returns: axes object """ if obs2 == '': ax = sns.distplot(df[obs1]) else: ax = sns.FacetGrid(df, hue=obs2) ax = (g.map(sns.distplot, obs1, hist=False)) return ax # Function to Plot Relational (x,y) Plots def plot_relational(df, x, y, hue=None, kind=None): """ Create a plot for an x,y relationship (default = scatter plot) Optional functionality for additional observations. Arguments: df (pandas data frame): data frame containing at least 2 columns of numerical values x (string): observation for the independent variable y (string): observation for the dependent variable hue (string, optional): additional observation to color the plot on kind (string, optional): type of plot to create [scatter, line] Returns: axes object """ if kind == None or kind == "scatter": ax = sns.scatterplot(data=df, x=x, y=y, hue=hue) else: ax = sns.lineplot(data=df, x=x, y=y, hue=hue) return ax def plot_categorical(df, x, y, hue=None, kind=None): """ Create a plot for an x,y relationship where x is categorical (not numerical) Arguments: df (pandas data frame): data frame containing at least 2 columns of numerical values x (string): observation for the independent variable (categorical) y (string): observation for the dependent variable hue (string, optional): additional observation to color the plot on kind (string, optional): type of plot to create. Options should include at least: strip (default), box, and violin """ if kind == None or kind == "strip": ax = sns.stripplot(data=df, x=x, y=y, hue=hue) elif kind == "violin": ax = sns.violinplot(data=df, x=x, y=y, hue=hue) elif kind == "box": ax = sns.boxplot(data=df, x=x, y=y, hue=hue) return ax def main(): """ Generate the following plots: 1. Histogram of patient ages 2. Histogram of gene expression for 1 gene 3. Scatter plot of gene expression for 1 gene by ages 4. Scatter plot of gene expression for 1 gene by disease state """ def differential_expression(data, group_col, features, reference=None): """ Perform a one-way ANOVA across all provided features for a given grouping. Arguments --------- data : (pandas.DataFrame) DataFrame containing group information and feature values. group_col : (str) Column in `data` containing sample group labels. features : (list, numpy.ndarray): Columns in `data` to test for differential expression. Having them be gene names would make sense. :thinking: reference : (str, optional) Value in `group_col` to use as the reference group. Default is None, and the value will be chosen. Returns ------- pandas.DataFrame A DataFrame of differential expression results with columns for fold changes between groups, maximum fold change from reference, f values, p values, and adjusted p-values by Bonferroni correction. """ if group_col not in data.columns: raise ValueError("`group_col` {} not found in data".format(group_col)) if any([x not in data.columns for x in features]): raise ValueError("Not all provided features found in data.") if reference is None: reference = data[group_col].unique()[0] print("No reference group provided. Using {}".format(reference)) elif reference not in data[group_col].unique(): raise ValueError("Reference value {} not found in column {}.".format( reference, group_col)) by_group = data.groupby(group_col) reference_avg = by_group.get_group(reference).loc[:,features].mean() values = [] results = {} for each, index in by_group.groups.items(): values.append(data.loc[index, features]) if each != reference: key = "{}.FoldChange".format(each) results[key] = data.loc[index, features].mean()\ / reference_avg fold_change_cols = list(results.keys()) fvalues, pvalues = stats.f_oneway(*values) results['f.value'] = fvalues results['p.value'] = pvalues results['p.value.adj'] = pvalues * len(features) results_df = pd.DataFrame(results) def largest_deviation(x): i = np.where(abs(x) == max(abs(x)))[0][0] return x[i] results_df['Max.FoldChange'] = results_df[fold_change_cols].apply( lambda x: largest_deviation(x.values), axis=1) return results_df # Here's some pre-subsetted data hippocampus = data[data["Tissue"] == "hippocampus"] pf_cortex = data[data["Tissue"] == "Pre-frontal cortex (BA46)"] as_striatum = data[data["Tissue"] == "Associative striatum"] # Here's how we can subset a dataset by two conditions. # You might find it useful :thinking: data[(data["Tissue"] == 'hippocampus') & (data['Disease.state'] == 'control')] def volcano_plot(data, sig_col, fc_col, sig_thresh, fc_thresh): """ Generate a volcano plot to showcasing differentially expressed genes. Parameters ---------- data : (pandas.DataFrame) A data frame containing differential expression results sig_col : str Column in `data` with adjusted p-values. fc_col : str Column in `data` with fold changes. sig_thresh : str Threshold for statistical significance. fc_thresh """ data['significant'] = False data[fc_col] = np.log2(data[fc_col]) de_genes = (data[sig_col] < sig_thesh) & (data[fc_col].abs() > fc_thresh) data.loc[de_genes, 'significant'] = True ax = sns.scatterplot(x=fc_col, y=sig_col, hue='significant', data=data, palette=['black', 'red'], alpha=0.75) linewidth = plt.rcParams['lines.linewidth'] - 1 plt.axvline(x=fc_thresh, linestyle='--', linewidth=linewidth, color='#4D4E4F') plt.axvline(x=-fc_thresh, linestyle='--', linewidth=linewidth, color='#4D4E4F') plt.axhline(y=sig_thresh, linestyle='--', linewidth=linewidth, color='#4D4E4F') ax.legend().set_visible(False) ylabel = sig_col if sig_col.lower() == 'fdr': ylabel = 'False Discovery Rate' plt.xlabel(r"$log_2$ Fold Change") plt.ylabel(ylabel) for spine in ['right', 'top']: ax.spines[spine].set_visible(False) plt.tight_layout() return ax def heatmap(data, genes, group_col): """[summary] Parameters ---------- data : pd.DataFrame A (sample x gene) data matrix containing gene expression values for each sample. genes : list, str List of genes to plot """ plot_data = anno_df[:, genes] ax = sns.clustermap(data, cmap='RdBu_r', z_score=1) return ax While we won't need the parameter right now, we'll likely use it in the future. **Function Requirements** - Generate plot data by clustering the entire dataset on the first 50 principle components. Vary K values from 2 - 10. - While you've been supplied a helper function for clustering, you'll need to supply the principle components yourself. Refer to your machine learning workshop along with the scikit-learn [documentation](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) - Plots each k and it's associated value. - Plots lines connecting each data point. - Produces a plot with correctly labelled axes. **Hint:** Working with an axis object is similar to base matplotlib, except `plt.scatter()` might become something like `ax.scatter()`. #### Helper Function #### Task 2a Implementation Once you've created the base plotting function, you'll probably realize we have no indivation of where the elbow point is. Fix this by adding another optional parameter (`best`) to your function. The parameter `best` should be the K value that produces the elbow point. **Function Requirements** - Add an optional parameter `best` that if supplied denotes the elbow point with a vertical, dashed line. - If `best` is not supplied, the plot should still be produced but without denoting the elbow point. **Hint**: `plt.axvline` and `plt.axhline` can be used to produce vertical and horizontal lines, respectively. More information [here](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.axvline.html) **Note**: You are not required to have the line end at the associated score value. ### Silhouette Plots Silhouette plots are another way to visually diagnose cluster performance. They are created by finding the [silhouette coefficient](https://en.wikipedia.org/wiki/Silhouette_(clustering)) for each sample in the data, and plotting an area graph for each cluster. The silhouette coefficient measures how well-separated clusters are from each other. The value ranges from $[-1 , 1]$, where 1 indicates good separation, 0 indicates randomness, and -1 indicates mixing of clusters. An example is posted below. ![](https://scikit-plot.readthedocs.io/en/stable/_images/plot_silhouette.png) As you can see, each sample in each cluster has the area filled from some minimal point (usually 0 or the minimum score in the dataset) and clusters are separated to produce distinct [silhouettes](https://www.youtube.com/watch?v=-TcUvXzgwMY). ### Task 3b: Implement a function to plot silhouette coefficients Because the code for create a silhouette plot can be a little bit involved, we've created both a skeleton function with documentation, and provided the following pseudo-code: **Hint**: you might find [ax.fill_betweenx()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.fill_betweenx.html) and [ax.set_yticks()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.set_yticks.html?highlight=set_yticks#matplotlib.axes.Axes.set_yticks)/ [ax.set_yticklabels()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.set_yticklabels.html?highlight=set_yticklabels#matplotlib.axes.Axes.set_yticklabels) useful. ### Task 3C: Put it all together! **Requirements** - Create a function `cluster_and_plot` that will cluster a provided dataset for a range of k-values - The function should return a single figure with two subplots: - An elbow plot with the "best" K value distinguished - A silhouette plot associated with clustering determined by the provided K value. - Appropriate axes labels **Hint**: You will likely find [plt.subplots()](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.subplots.html?highlight=subplots#matplotlib.pyplot.subplots) useful.
0.795261
0.989938
``` import numpy as np import pandas as pd import math from xml.dom import minidom from xml.etree import cElementTree as ElementTree import os import nltk import pickle import csv nltk.download('stopwords') nltk.download('punkt') nltk.download('wordnet') import timeit # Ranked Retrieval and Document Vectorization def documentReader(path, queries = False): """ DocString :return: Nothing """ documents_path = os.path.join(os.getcwd(), path) documentos = {} for filename in os.listdir(documents_path): file_path = os.path.join(documents_path, filename) xmldoc = minidom.parse(file_path) id = xmldoc.getElementsByTagName('public')[0].attributes['publicId'].value title = '' if queries else xmldoc.getElementsByTagName('fileDesc')[0].attributes['title'].value data = next(ElementTree.parse(file_path).iter('raw')).text documentos[id] = (title + ' ' + data).replace(u'\xa0', u' ').replace('\n', ' ') return documentos documentos = documentReader('docs/docs-raw-texts') NRO_DOCS = len(documentos) DOCS_IDs = list(documentos.keys()) print(list(documentos.items())[0]) def tokenization(documentos): """ :param documentos: :return: """ nltk_stop_words_en = set(nltk.corpus.stopwords.words("english")) wordnet_lemmatizer = nltk.stem.WordNetLemmatizer() word_tok = {key: nltk.word_tokenize(doc) for key, doc in documentos.items()} word_tok_sw = {key: [token for token in doc if token.isalnum() and token not in nltk_stop_words_en] for key, doc in word_tok.items()} nltk_lemmaList = {key: [wordnet_lemmatizer.lemmatize(word) for word in doc] for key, doc in word_tok_sw.items()} return nltk_lemmaList tokenized_docs = tokenization(documentos) print(list(tokenized_docs.items())[0]) def indexReader(): """ Reads the inverted index created in the distributed_inverted_index.ipynb notebook The path from where it reads the file is docs/inverted_index.pkl :return: the inverted index, represented in a dictionary where the keys are the terms and the values is also a dictionary that contains the frecuency of documents that contain the term, and the posting. The posting is an array itself that contains the document id, and the term's frequency in that document. {'term': {'freq': df, 'posting':[[doc1, tf1],[doc2, tf2],...,[docn, tfn]}} """ with open('docs/inverted_index.pkl', 'rb') as index: return pickle.load(index) invertedIndex = indexReader() print(invertedIndex['William']) def tfidfWeightedVector(invertedIndex): """ Recibe el inverted index y lo usa para crear el weighted vector :param invertedIndex: Inverted index calculada antes :return: Matriz |V|x|D|. Donde |V| es el tamaño de mi vocabulario y |D| es el número de documentos. Cada fila es la representación en tfidf de un término """ weightedVectorMatrix = [] index = [] for term, term_dict in invertedIndex.items(): weighted_vector = np.zeros(NRO_DOCS) freq = term_dict['freq'] index.append(term) for id, t_freq in term_dict['posting']: tfidf = np.log(1 + t_freq) * np.log10(NRO_DOCS / freq) weighted_vector[ id - 1] = tfidf weightedVectorMatrix.append(weighted_vector) weighted_vector_df = pd.DataFrame.from_records(data=weightedVectorMatrix, index=index, columns=DOCS_IDs) return weighted_vector_df, index weighted_vector_df, term_index = tfidfWeightedVector(invertedIndex) weighted_vector_df.tail() print(f'Matriz tfidf de dimension {weighted_vector_df.shape}') def norma(v): suma = sum(v[i]**2 for i in range(len(v))) return math.sqrt(suma) def dot_product(v1, v2): product = sum( v1[0][i]*v2[i][0] for i in range(len(v2)) ) return product def cosine_Similarity(doc_vec1, doc_vec2): # print('.') return (dot_product(doc_vec1, doc_vec2)) / (norma(doc_vec1.flatten()) * norma(doc_vec2.flatten())) def cosine_Similarity_normQ(query, doc): return dot_product(query, doc) / norma(doc.flatten()) # HAcer ejemplo a mano a ver si sirve #Desde acá start_time = timeit.default_timer() queries = documentReader('docs/queries-raw-texts', True) print(list(queries.items())[0]) tokenized_queries = tokenization(queries) print(list(tokenized_queries.items())[0]) def vectorize_queries(queries, term_index): """ Vectoriza las queries tal forma que cada una sea un vector unitario con la misma dimensión del tamaño de vocabulario encontrado en el corpus de los documentos. :param queries: Diccionario con pares key, value. Donde key es el id de cada query y el value es la tokenización de dicha query. :param term_index: Lista con los ids de las queries. i.e. ['q01', 'q02', ...] :return: """ vector_queries = [] queries_index = [] for id, query in queries.items(): queries_index.append(id) query_vector = np.zeros(len(term_index)) #Vector de ceros de dimensión V len_query = len(query) for term in query: try: index = term_index.index(term) query_vector[index] = 1 / math.sqrt(len_query) #Pone en 1 la dimensión del vector correpondiente al termino en term except: print(f'El término "{term}" de la query {id} no está en los docs') vector_queries.append(query_vector) return vector_queries, queries_index vector_queries, queries_index = vectorize_queries(tokenized_queries, term_index) # vector_queries[0][:1000] matrix_queries = pd.DataFrame.from_records(data=vector_queries, index=queries_index, columns=term_index) matrix_queries.head() print(matrix_queries.iloc[1].sum()) def getCosineSimilarity(queries, documents, query_index, docs_index): """ Recibe la matriz tfidf de documentos y la de queries y saca las matriz de similitud usando coseno como métrica de similitud :param queries: Matriz |Q|x|V| donde |V| es el tamaño del vocabulario y |Q| el número de queries. Cada vector-fila es unitario y tiene 0 en todoas las columnas con términos que no hacen parte de la query que representa la fila en cuestión. :param documents: Matriz |V|x|D|, Donde |D| es el número de documentos. Cada fila es la representación tfidf de un término. :param query_index: Lista con los ids de las queries. i.e. ['q01', 'q02', ...] :param docs_index: Lista con los ids de los docs. i.e. ['d01', 'd02', ...] :return:Retorna la matriz de similitud entre las queries y los documentos """ similarity_matrix = [] for query in query_index: print(query, end=' - ') row_query = queries.loc[[query]].values query_doc_sim = [] for document in docs_index: col_document = documents[[document]].values cos_sim = cosine_Similarity_normQ(row_query, col_document) query_doc_sim.append(cos_sim) similarity_matrix.append(query_doc_sim) return similarity_matrix similarity_matrix = getCosineSimilarity(matrix_queries, weighted_vector_df, queries_index, DOCS_IDs) print(len(similarity_matrix)) ``` ## Save cosine similarity Matrix ``` with open('docs/cos_sim_matrix', 'wb') as picklefile: pickle.dump(similarity_matrix,picklefile) ``` ## Read cosine similarity Matrix ``` with open('docs/cos_sim_matrix', 'rb') as matrix: similarity_matrix = pd.DataFrame.from_records(pickle.load(matrix), index=queries_index, columns=DOCS_IDs) similarity_matrix.head(10) ``` ### Retrieve ordered docs per query ``` def retrieve_docs(similarity_matrix, query_index): """ Para cuada query se aplica el método de cosine similarity y se devuelve una lista ordenada con ids de docs relevantes para esa query ordenados por el número del id. :param similarity_matrix: Cosine similarity matrix calculada previamente :param query_index: Lista con los ids de las queries. i.e. ['q01', 'q02', ...] :return: El diccionario con pares key, value. Key = id del query. Value lista con resultados relevantes de docs :según el cosine similarity. """ results = {} results_with_scores = {} for query in query_index: order = similarity_matrix.loc[[query]].sort_values(by=query, axis=1, ascending=False, inplace=False) relevant = order.loc[:, (order != 0 ).any(axis=0)] results[query] = relevant.columns.values.tolist() results_with_scores[query] = relevant.to_dict(orient='list') return results, results_with_scores results, results_with_scores = retrieve_docs(similarity_matrix, queries_index) print(results['q01'][:5]) # Hasta acá stop_time = timeit.default_timer() print(stop_time - start_time) def writeScoreFile(RRDV): """ Writes the RRI-queries_results.tsv that contains the score of each query :param RRDV: Dictionary with the scores, {'qYY': {'dXXX': score1, 'dXXX': score2, ..., 'dXXX': score3 } } """ file_path = os.path.join(os.getcwd(), 'docs/answer_files/RRDV-queries_results.tsv') with open(file_path, 'wt') as out_file: tsv_writer = csv.writer(out_file, delimiter='\t') for query_id, scores in RRDV.items(): scores_list = "" for doc, score in scores.items(): scores_list+= doc +":"+str(round(score[0],4))+"," tsv_writer.writerow([query_id,scores_list[:-1]]) writeScoreFile(results_with_scores) print("Archivo escrito") ``` ## Evaluation ``` def read_judgemnts_file(): """ Lee el archivo de relevancia de los jueces :return: Diccionario con pares key: value, donde el key es el id de cada query y el value es otro doccionario con las ids de los docs relevantes para esa query ordenados en forma creciente. """ document_path = os.path.join(os.getcwd(), 'docs/relevance-judgments.tsv') tsv_file = open(document_path) read_tsv = csv.reader(tsv_file, delimiter="\t") relevance = {} for row in read_tsv: documents = row[1].split(',') query_relevance = {pair.split(':')[0] : pair.split(':')[1] for pair in documents } query_relevance = dict(sorted(query_relevance.items(), key=lambda item: item[0])) relevance[row[0]] = query_relevance return relevance relevance = read_judgemnts_file() print(relevance['q01']) def make_binary_result(results, relevant_res): """ Este método toma los resultados crudos obtenidos para las queries (Para cada query la lista de documentos ordenaos por relevancia), devuelve 3 representaciones de estos resultados. La primera es la representacion binaria at K. Que es del mismo tamaño que el número de documentos relevantes. La segunda es esta misma lista pero con la escala dada por el archivo de evaluación. La tercera está destinada al cálculo del MAP, tiene la representación binaria hasta que salgan todos los documentos relevantes o simplemente de todos los documentos, además en su segundo componente tiene el número de documentos relevantes que deberían salir en los resultados según el archivo de evaluación. :param results: Diccionario con resultados crudos de cada query. Ej: {'q01': ['d254', 'd016', 'd153', ...]} :param relevant_res: Las 3 representaciones antes mencionadas :return: """ bin_relevant = {} rel_scale_repr = {} map_relevant_docs = {} for query, relevant_docs in relevant_res.items(): bin_repr = [] scaled_repr = [] map_repr = [] M = len(relevant_docs) for doc_id, rel_scale in relevant_docs.items(): bin = 1 if doc_id in results[query][:M] else 0 bin_repr.append(bin) scaled_repr.append(bin * int(rel_scale)) i = 0 for doc_id in results[query]: if i < M: map_bin = 1 if doc_id in relevant_res[query] else 0 i += map_bin map_repr.append(map_bin) bin_relevant[query] = bin_repr rel_scale_repr[query] = scaled_repr map_relevant_docs[query] = [map_repr, M] return bin_relevant, rel_scale_repr, map_relevant_docs bin_results, scaled_results, map_relevant_docs = make_binary_result(results, relevance) print(bin_results['q01']) print(scaled_results['q01']) print('Primeros 5 documentos devueltos como relevantes para q01: \n', results['q01'][:5]) print('Documentos relevantes para q01 según jueces: \n' , relevance['q01']) print('Representación binaria de q01, hasta el último doc relevante: \n' ,map_relevant_docs['q01']) ``` ### Definition of IR metrics functions ``` def precision_at_k(relevance: list, k: int): """ DocString :return: Nothing """ if k == 0: return 0 l = np.array(relevance[:k]).sum()/k return l def recall_at_k(relevance: list, nr_relevant: int, k: int): """ DocString :return: Nothing """ l = np.array(relevance[:k]).sum()/nr_relevant return l def average_precision(relevance): """ DocString :return: Nothing """ length = len(relevance[0]) sum = 0 for i in range(length): if relevance[0][i]: sum += precision_at_k(relevance[0], i+1) if np.array(relevance[0]).sum()==0: return 0 else: return sum / relevance[1] def mean_avg_precision(l): """ DocString :return: Nothing """ mean = np.array([ average_precision(lista) for lista in l]).mean() return mean mean_avg_precision([[[0, 1, 0, 1, 1, 1, 1], 5]]) def dcg_at_k(relevance, k: int): """ Calcula el DCG at K de un vector binario representando los resultados relevantes para una query. :param relevance: Vector binario :return: DCG at K de nuestra query """ sum = 0 i = 0 for rel_i in relevance[: k]: i+= 1 sum += rel_i/np.log2(max(i, 2)) return sum dcg_at_k([4, 4, 3, 0, 0, 1, 3, 3, 3, 0], 6) def ndcg_at_k(relevance, k): """ Calcula el ndcg at k de un vector binario :return: NDCG at K. """ rel_sorted = sorted(relevance, reverse=True) max = dcg_at_k(rel_sorted, k) real = dcg_at_k(relevance, k) return real/ max ndcg_at_k([4, 4, 3, 0, 0, 1, 3, 3, 3, 0], 6) print(recall_at_k(bin_results['q01'], 3, 3)) ``` ## Compute Evaluation Metrics for each query ``` def evaluation_metric(bin_queries, query_index, scaled_results): """ :param bin_queries: Diccionario con valores {query Key: vector}, donde el vector corresponde a una lista con la representación binaria de un de los resultados encontrados para una query con relación a los dados en el archivo de evaluación. Ej, para q01, los relevantes son: d186,d254,d016. El RRDV devuelve d254, d016, d153. Por ende, la representación binaria de q01, en el orden del archivo de evaluación es: [0, 1, 1] :param query_index: Lista con los ids de las queries. ['qo1', 'qo2', ...] :param scaled_results: Representación escalada de los resultados de las queries usando la escala dada en el archivo de evaluación. Ej, q01 pasa de [0, 1, 1] a [0, 5, 5] :return: Un dataframe con el cálculo del P@M, r@M y NDCG@M para cada query """ COLUMNS = ['P@M', 'R@M', 'NDCG@M'] records = [] for query, bin_vec in bin_queries.items(): scaled = scaled_results[query] M = len(bin_vec) pm = precision_at_k(bin_vec, M) rm = recall_at_k(bin_vec, M, M) ndcg = ndcg_at_k(scaled, M) records.append([pm, rm, ndcg]) return pd.DataFrame.from_records(records, index=query_index, columns=COLUMNS) metrics = evaluation_metric(bin_results, queries_index, scaled_results) metrics ``` ### MAP ``` def overall_map(map_relevant_docs): """ Función que calcula el MAP de los resultados de las queries. :param map_relevant_docs: Vector binario de las queries asegurandose de que aparezcan todos los documentos relevantes :return: El Mean average precision de los resultados de las queries. """ matrix = [vector for key, vector in map_relevant_docs.items() ] return mean_avg_precision(matrix) print(f'MAP resultante de todas las queries: {overall_map(map_relevant_docs)}') ```
github_jupyter
import numpy as np import pandas as pd import math from xml.dom import minidom from xml.etree import cElementTree as ElementTree import os import nltk import pickle import csv nltk.download('stopwords') nltk.download('punkt') nltk.download('wordnet') import timeit # Ranked Retrieval and Document Vectorization def documentReader(path, queries = False): """ DocString :return: Nothing """ documents_path = os.path.join(os.getcwd(), path) documentos = {} for filename in os.listdir(documents_path): file_path = os.path.join(documents_path, filename) xmldoc = minidom.parse(file_path) id = xmldoc.getElementsByTagName('public')[0].attributes['publicId'].value title = '' if queries else xmldoc.getElementsByTagName('fileDesc')[0].attributes['title'].value data = next(ElementTree.parse(file_path).iter('raw')).text documentos[id] = (title + ' ' + data).replace(u'\xa0', u' ').replace('\n', ' ') return documentos documentos = documentReader('docs/docs-raw-texts') NRO_DOCS = len(documentos) DOCS_IDs = list(documentos.keys()) print(list(documentos.items())[0]) def tokenization(documentos): """ :param documentos: :return: """ nltk_stop_words_en = set(nltk.corpus.stopwords.words("english")) wordnet_lemmatizer = nltk.stem.WordNetLemmatizer() word_tok = {key: nltk.word_tokenize(doc) for key, doc in documentos.items()} word_tok_sw = {key: [token for token in doc if token.isalnum() and token not in nltk_stop_words_en] for key, doc in word_tok.items()} nltk_lemmaList = {key: [wordnet_lemmatizer.lemmatize(word) for word in doc] for key, doc in word_tok_sw.items()} return nltk_lemmaList tokenized_docs = tokenization(documentos) print(list(tokenized_docs.items())[0]) def indexReader(): """ Reads the inverted index created in the distributed_inverted_index.ipynb notebook The path from where it reads the file is docs/inverted_index.pkl :return: the inverted index, represented in a dictionary where the keys are the terms and the values is also a dictionary that contains the frecuency of documents that contain the term, and the posting. The posting is an array itself that contains the document id, and the term's frequency in that document. {'term': {'freq': df, 'posting':[[doc1, tf1],[doc2, tf2],...,[docn, tfn]}} """ with open('docs/inverted_index.pkl', 'rb') as index: return pickle.load(index) invertedIndex = indexReader() print(invertedIndex['William']) def tfidfWeightedVector(invertedIndex): """ Recibe el inverted index y lo usa para crear el weighted vector :param invertedIndex: Inverted index calculada antes :return: Matriz |V|x|D|. Donde |V| es el tamaño de mi vocabulario y |D| es el número de documentos. Cada fila es la representación en tfidf de un término """ weightedVectorMatrix = [] index = [] for term, term_dict in invertedIndex.items(): weighted_vector = np.zeros(NRO_DOCS) freq = term_dict['freq'] index.append(term) for id, t_freq in term_dict['posting']: tfidf = np.log(1 + t_freq) * np.log10(NRO_DOCS / freq) weighted_vector[ id - 1] = tfidf weightedVectorMatrix.append(weighted_vector) weighted_vector_df = pd.DataFrame.from_records(data=weightedVectorMatrix, index=index, columns=DOCS_IDs) return weighted_vector_df, index weighted_vector_df, term_index = tfidfWeightedVector(invertedIndex) weighted_vector_df.tail() print(f'Matriz tfidf de dimension {weighted_vector_df.shape}') def norma(v): suma = sum(v[i]**2 for i in range(len(v))) return math.sqrt(suma) def dot_product(v1, v2): product = sum( v1[0][i]*v2[i][0] for i in range(len(v2)) ) return product def cosine_Similarity(doc_vec1, doc_vec2): # print('.') return (dot_product(doc_vec1, doc_vec2)) / (norma(doc_vec1.flatten()) * norma(doc_vec2.flatten())) def cosine_Similarity_normQ(query, doc): return dot_product(query, doc) / norma(doc.flatten()) # HAcer ejemplo a mano a ver si sirve #Desde acá start_time = timeit.default_timer() queries = documentReader('docs/queries-raw-texts', True) print(list(queries.items())[0]) tokenized_queries = tokenization(queries) print(list(tokenized_queries.items())[0]) def vectorize_queries(queries, term_index): """ Vectoriza las queries tal forma que cada una sea un vector unitario con la misma dimensión del tamaño de vocabulario encontrado en el corpus de los documentos. :param queries: Diccionario con pares key, value. Donde key es el id de cada query y el value es la tokenización de dicha query. :param term_index: Lista con los ids de las queries. i.e. ['q01', 'q02', ...] :return: """ vector_queries = [] queries_index = [] for id, query in queries.items(): queries_index.append(id) query_vector = np.zeros(len(term_index)) #Vector de ceros de dimensión V len_query = len(query) for term in query: try: index = term_index.index(term) query_vector[index] = 1 / math.sqrt(len_query) #Pone en 1 la dimensión del vector correpondiente al termino en term except: print(f'El término "{term}" de la query {id} no está en los docs') vector_queries.append(query_vector) return vector_queries, queries_index vector_queries, queries_index = vectorize_queries(tokenized_queries, term_index) # vector_queries[0][:1000] matrix_queries = pd.DataFrame.from_records(data=vector_queries, index=queries_index, columns=term_index) matrix_queries.head() print(matrix_queries.iloc[1].sum()) def getCosineSimilarity(queries, documents, query_index, docs_index): """ Recibe la matriz tfidf de documentos y la de queries y saca las matriz de similitud usando coseno como métrica de similitud :param queries: Matriz |Q|x|V| donde |V| es el tamaño del vocabulario y |Q| el número de queries. Cada vector-fila es unitario y tiene 0 en todoas las columnas con términos que no hacen parte de la query que representa la fila en cuestión. :param documents: Matriz |V|x|D|, Donde |D| es el número de documentos. Cada fila es la representación tfidf de un término. :param query_index: Lista con los ids de las queries. i.e. ['q01', 'q02', ...] :param docs_index: Lista con los ids de los docs. i.e. ['d01', 'd02', ...] :return:Retorna la matriz de similitud entre las queries y los documentos """ similarity_matrix = [] for query in query_index: print(query, end=' - ') row_query = queries.loc[[query]].values query_doc_sim = [] for document in docs_index: col_document = documents[[document]].values cos_sim = cosine_Similarity_normQ(row_query, col_document) query_doc_sim.append(cos_sim) similarity_matrix.append(query_doc_sim) return similarity_matrix similarity_matrix = getCosineSimilarity(matrix_queries, weighted_vector_df, queries_index, DOCS_IDs) print(len(similarity_matrix)) with open('docs/cos_sim_matrix', 'wb') as picklefile: pickle.dump(similarity_matrix,picklefile) with open('docs/cos_sim_matrix', 'rb') as matrix: similarity_matrix = pd.DataFrame.from_records(pickle.load(matrix), index=queries_index, columns=DOCS_IDs) similarity_matrix.head(10) def retrieve_docs(similarity_matrix, query_index): """ Para cuada query se aplica el método de cosine similarity y se devuelve una lista ordenada con ids de docs relevantes para esa query ordenados por el número del id. :param similarity_matrix: Cosine similarity matrix calculada previamente :param query_index: Lista con los ids de las queries. i.e. ['q01', 'q02', ...] :return: El diccionario con pares key, value. Key = id del query. Value lista con resultados relevantes de docs :según el cosine similarity. """ results = {} results_with_scores = {} for query in query_index: order = similarity_matrix.loc[[query]].sort_values(by=query, axis=1, ascending=False, inplace=False) relevant = order.loc[:, (order != 0 ).any(axis=0)] results[query] = relevant.columns.values.tolist() results_with_scores[query] = relevant.to_dict(orient='list') return results, results_with_scores results, results_with_scores = retrieve_docs(similarity_matrix, queries_index) print(results['q01'][:5]) # Hasta acá stop_time = timeit.default_timer() print(stop_time - start_time) def writeScoreFile(RRDV): """ Writes the RRI-queries_results.tsv that contains the score of each query :param RRDV: Dictionary with the scores, {'qYY': {'dXXX': score1, 'dXXX': score2, ..., 'dXXX': score3 } } """ file_path = os.path.join(os.getcwd(), 'docs/answer_files/RRDV-queries_results.tsv') with open(file_path, 'wt') as out_file: tsv_writer = csv.writer(out_file, delimiter='\t') for query_id, scores in RRDV.items(): scores_list = "" for doc, score in scores.items(): scores_list+= doc +":"+str(round(score[0],4))+"," tsv_writer.writerow([query_id,scores_list[:-1]]) writeScoreFile(results_with_scores) print("Archivo escrito") def read_judgemnts_file(): """ Lee el archivo de relevancia de los jueces :return: Diccionario con pares key: value, donde el key es el id de cada query y el value es otro doccionario con las ids de los docs relevantes para esa query ordenados en forma creciente. """ document_path = os.path.join(os.getcwd(), 'docs/relevance-judgments.tsv') tsv_file = open(document_path) read_tsv = csv.reader(tsv_file, delimiter="\t") relevance = {} for row in read_tsv: documents = row[1].split(',') query_relevance = {pair.split(':')[0] : pair.split(':')[1] for pair in documents } query_relevance = dict(sorted(query_relevance.items(), key=lambda item: item[0])) relevance[row[0]] = query_relevance return relevance relevance = read_judgemnts_file() print(relevance['q01']) def make_binary_result(results, relevant_res): """ Este método toma los resultados crudos obtenidos para las queries (Para cada query la lista de documentos ordenaos por relevancia), devuelve 3 representaciones de estos resultados. La primera es la representacion binaria at K. Que es del mismo tamaño que el número de documentos relevantes. La segunda es esta misma lista pero con la escala dada por el archivo de evaluación. La tercera está destinada al cálculo del MAP, tiene la representación binaria hasta que salgan todos los documentos relevantes o simplemente de todos los documentos, además en su segundo componente tiene el número de documentos relevantes que deberían salir en los resultados según el archivo de evaluación. :param results: Diccionario con resultados crudos de cada query. Ej: {'q01': ['d254', 'd016', 'd153', ...]} :param relevant_res: Las 3 representaciones antes mencionadas :return: """ bin_relevant = {} rel_scale_repr = {} map_relevant_docs = {} for query, relevant_docs in relevant_res.items(): bin_repr = [] scaled_repr = [] map_repr = [] M = len(relevant_docs) for doc_id, rel_scale in relevant_docs.items(): bin = 1 if doc_id in results[query][:M] else 0 bin_repr.append(bin) scaled_repr.append(bin * int(rel_scale)) i = 0 for doc_id in results[query]: if i < M: map_bin = 1 if doc_id in relevant_res[query] else 0 i += map_bin map_repr.append(map_bin) bin_relevant[query] = bin_repr rel_scale_repr[query] = scaled_repr map_relevant_docs[query] = [map_repr, M] return bin_relevant, rel_scale_repr, map_relevant_docs bin_results, scaled_results, map_relevant_docs = make_binary_result(results, relevance) print(bin_results['q01']) print(scaled_results['q01']) print('Primeros 5 documentos devueltos como relevantes para q01: \n', results['q01'][:5]) print('Documentos relevantes para q01 según jueces: \n' , relevance['q01']) print('Representación binaria de q01, hasta el último doc relevante: \n' ,map_relevant_docs['q01']) def precision_at_k(relevance: list, k: int): """ DocString :return: Nothing """ if k == 0: return 0 l = np.array(relevance[:k]).sum()/k return l def recall_at_k(relevance: list, nr_relevant: int, k: int): """ DocString :return: Nothing """ l = np.array(relevance[:k]).sum()/nr_relevant return l def average_precision(relevance): """ DocString :return: Nothing """ length = len(relevance[0]) sum = 0 for i in range(length): if relevance[0][i]: sum += precision_at_k(relevance[0], i+1) if np.array(relevance[0]).sum()==0: return 0 else: return sum / relevance[1] def mean_avg_precision(l): """ DocString :return: Nothing """ mean = np.array([ average_precision(lista) for lista in l]).mean() return mean mean_avg_precision([[[0, 1, 0, 1, 1, 1, 1], 5]]) def dcg_at_k(relevance, k: int): """ Calcula el DCG at K de un vector binario representando los resultados relevantes para una query. :param relevance: Vector binario :return: DCG at K de nuestra query """ sum = 0 i = 0 for rel_i in relevance[: k]: i+= 1 sum += rel_i/np.log2(max(i, 2)) return sum dcg_at_k([4, 4, 3, 0, 0, 1, 3, 3, 3, 0], 6) def ndcg_at_k(relevance, k): """ Calcula el ndcg at k de un vector binario :return: NDCG at K. """ rel_sorted = sorted(relevance, reverse=True) max = dcg_at_k(rel_sorted, k) real = dcg_at_k(relevance, k) return real/ max ndcg_at_k([4, 4, 3, 0, 0, 1, 3, 3, 3, 0], 6) print(recall_at_k(bin_results['q01'], 3, 3)) def evaluation_metric(bin_queries, query_index, scaled_results): """ :param bin_queries: Diccionario con valores {query Key: vector}, donde el vector corresponde a una lista con la representación binaria de un de los resultados encontrados para una query con relación a los dados en el archivo de evaluación. Ej, para q01, los relevantes son: d186,d254,d016. El RRDV devuelve d254, d016, d153. Por ende, la representación binaria de q01, en el orden del archivo de evaluación es: [0, 1, 1] :param query_index: Lista con los ids de las queries. ['qo1', 'qo2', ...] :param scaled_results: Representación escalada de los resultados de las queries usando la escala dada en el archivo de evaluación. Ej, q01 pasa de [0, 1, 1] a [0, 5, 5] :return: Un dataframe con el cálculo del P@M, r@M y NDCG@M para cada query """ COLUMNS = ['P@M', 'R@M', 'NDCG@M'] records = [] for query, bin_vec in bin_queries.items(): scaled = scaled_results[query] M = len(bin_vec) pm = precision_at_k(bin_vec, M) rm = recall_at_k(bin_vec, M, M) ndcg = ndcg_at_k(scaled, M) records.append([pm, rm, ndcg]) return pd.DataFrame.from_records(records, index=query_index, columns=COLUMNS) metrics = evaluation_metric(bin_results, queries_index, scaled_results) metrics def overall_map(map_relevant_docs): """ Función que calcula el MAP de los resultados de las queries. :param map_relevant_docs: Vector binario de las queries asegurandose de que aparezcan todos los documentos relevantes :return: El Mean average precision de los resultados de las queries. """ matrix = [vector for key, vector in map_relevant_docs.items() ] return mean_avg_precision(matrix) print(f'MAP resultante de todas las queries: {overall_map(map_relevant_docs)}')
0.450843
0.37399
First, some housekeeping... ``` import pandas as pd from rdkit import Chem from rdkit.Chem import Draw from rdkit.Chem import Descriptors from rdkit.Chem import rdChemReactions as Reactions # Define directories SMRY_DIR = 'Rxn_Summs' TYPE_DIR = 'Rxn_Type' INV_DIR = 'Inventory' PROP_DIR = 'Properties' ``` This Jupyter notebook is meant to serve as an example of how RouteScore is calculated in `RS_Base.py`. All other RouteScore calculations are performed the same way. First, import the various classes from `routescore.py` ``` from routescore import General, Reaction_Templates, Calculate gen = General() rxn = Reaction_Templates() calc = Calculate() ``` Next, load a molecule from the targets list. Here, we will choose the molecule with the lowest RouteScore We can skip lines 10-16 in `RS_Base.py` because they aren't required for calculating the RouteScore. ``` df = pd.read_pickle('./Targets/targets_Base.pkl') target = df.loc[df.RouteScore.idxmin()] gen.draw_mols([target.pentamer]) ``` Step 1 ``` scale = 0.0001 rxn_smi = f'{target.a}.{target.b}>>{target.ab}' rxn_obj = Reactions.ReactionFromSmarts(rxn_smi, useSmiles=True) Draw.ReactionToImage(rxn_obj) ``` This next cell is the equivalent of `Reaction_Templates.wingSuzuki()`. ``` # List of reactant molecules and eq per reaction site sm_list = [{'smiles': target.a, 'eq': 3}, {'smiles': target.b, 'eq': 1} ] # Number of reaction sites on the B molecule (1) rxn_sites = rxn.stoichiometry(target.b, 'Suzuki') print(f'Number of reaction sites on the molecule: {rxn_sites}') # Total equivalents of A molecule sm_list[0]['eq'] = rxn_sites * sm_list[0]['eq'] print(f"Updated equivalents of {target.a}: {sm_list[0]['eq']}") # Unlike subsequent reactions, scale isn't normalized because for the limiting reagent # We treat commercially available materials as being available in essentially unlimited quantities wS_scale: float = scale print(f'Scale of reaction: {wS_scale} mols') wS_yield: float = 1 print('Assumed reaction yield: 100%') ``` These next cells are the equivalent of `Calculate.StepScore()`. ``` multiplier = rxn_sites product_smiles = target.ab # Get information on starting materials from inventory block_dicts = [calc.get_block_info(sm['smiles']) for sm in sm_list] block_dicts # Get information on reagents for the reaction reaction = gen.load_pkl(TYPE_DIR, 'Suzuki') reaction ``` Calculate costs of synthesis ``` # Get reaction summary information rxn_smry = gen.load_pkl(SMRY_DIR, 'Suzuki_summary') print('The following numbers assumed that the robot is being used to its full capacity (48 parallel reactions).') n_parr: int = rxn_smry['n_parr'] print(f'Number of reactions performed in parallel: {n_parr}') t_H: float = rxn_smry['t_H'] / n_parr print(f't_H: {t_H} hrs') t_M: float = rxn_smry['t_M'] / n_parr print(f't_M: {t_M} hrs') # Time cost cost_time: float = calc.TTC(t_H, t_M) print(f'Time cost of reaction, TTC = {cost_time} hrs') # List of equivalents for each reactant sm_eqs = [sm['eq'] for sm in sm_list] block_costs = [sm['$/mol'] for sm in block_dicts] # Monetary cost cost_money, naive_chem_cost = calc.money(reaction, block_costs, sm_eqs, t_H, t_M, scale, multiplier) print(f'Monetary cost of reaction: ${cost_money}') block_MWs = [sm['g/mol'] for sm in block_dicts] # Materials cost cost_materials: float = calc.mass(reaction, block_MWs, sm_eqs, scale, multiplier) print(f'Mass cost of reaction: {cost_materials}g') step_score_1 = cost_time * cost_money * cost_materials print(f'StepScore for reaction 1 = {step_score_1} h*$*g') ``` Updating inventory. ``` MW: float = Descriptors.MolWt(Chem.MolFromSmiles(product_smiles)) # Add product information to inventory product_dict: dict = { 'Block_type': '-', 'Block_num': 0, 'SMILES': product_smiles, 'Name': '-', 'g/mol': MW, 'Quantity': 0, 'CAD': 0, '$/mol': 0, 'Manual?': '' } calc.update_inventory(product_smiles, product_dict) calc.inv.to_csv('./Inventory/Inventory.csv', index=False) ``` Step 2 ``` scale = wS_scale * wS_yield rxn_smi = f'{target.ab}.{target.c}>>{target.pentamer}' rxn_obj = Reactions.ReactionFromSmarts(rxn_smi, useSmiles=True) Draw.ReactionToImage(rxn_obj) ``` This next cell is the equivalent of `Reaction_Templates.pentamerSuzuki()`. ``` # List of reactant molecules and eq per reaction site sm_list = [{'smiles': target.ab, 'eq': 3}, {'smiles': target.c, 'eq': 1} ] # Number of reaction sites on the B molecule (1) rxn_sites = rxn.stoichiometry(target.c, 'Suzuki') print(f'Number of reaction sites on the molecule: {rxn_sites}') # Total equivalents of A molecule sm_list[0]['eq'] = rxn_sites * sm_list[0]['eq'] print(f"Updated equivalents of {target.ab}: {sm_list[0]['eq']}") # Normalize scale for quantity of limiting reagent (AB) pS_scale: float = scale / sm_list[0]['eq'] print(f'Scale of reaction: {pS_scale} mols') pS_yield: float = 1 print('Assumed reaction yield: 100%') ``` These next cells are the equivalent of `Calculate.StepScore()`. ``` multiplier = rxn_sites product_smiles = target.pentamer # Get information on starting materials from inventory block_dicts = [calc.get_block_info(sm['smiles']) for sm in sm_list] block_dicts # Get information on reagents for the reaction reaction = gen.load_pkl(TYPE_DIR, 'Suzuki') reaction ``` Calculate costs of synthesis ``` # Get reaction summary information rxn_smry = gen.load_pkl(SMRY_DIR, 'Suzuki_summary') print('The following numbers assumed that the robot is being used to its full capacity (48 parallel reactions).') n_parr: int = rxn_smry['n_parr'] print(f'Number of reactions performed in parallel: {n_parr}') t_H: float = rxn_smry['t_H'] / n_parr print(f't_H: {t_H} hrs') t_M: float = rxn_smry['t_M'] / n_parr print(f't_M: {t_M} hrs') # Time cost cost_time: float = calc.TTC(t_H, t_M) print(f'Time cost of reaction, TTC = {cost_time} hrs') # List of equivalents for each reactant sm_eqs = [sm['eq'] for sm in sm_list] block_costs = [sm['$/mol'] for sm in block_dicts] # Monetary cost cost_money, naive_chem_cost = calc.money(reaction, block_costs, sm_eqs, t_H, t_M, pS_scale, multiplier) print(f'Monetary cost of reaction: ${cost_money}') block_MWs = [sm['g/mol'] for sm in block_dicts] # Materials cost cost_materials: float = calc.mass(reaction, block_MWs, sm_eqs, pS_scale, multiplier) print(f'Mass cost of reaction: {cost_materials}g') step_score_2 = cost_time * cost_money * cost_materials print(f'StepScore for reaction 2 = {step_score_2} h*$*g') ``` Updating inventory. ``` MW: float = Descriptors.MolWt(Chem.MolFromSmiles(product_smiles)) # Add product information to inventory product_dict: dict = { 'Block_type': '-', 'Block_num': 0, 'SMILES': product_smiles, 'Name': '-', 'g/mol': MW, 'Quantity': 0, 'CAD': 0, '$/mol': 0, 'Manual?': '' } calc.update_inventory(product_smiles, product_dict) calc.inv.to_csv('./Inventory/Inventory.csv', index=False) final_scale = pS_scale * pS_yield final_scale ``` Calculate RouteScore. ``` route_score = (step_score_1 + step_score_2) / final_scale print(f'RouteScore = {route_score} h*$*g/(mol target)') ``` Check that RouteScore calculated in the Jupyter notebook matches the stored RouteScore. ``` print(f'RouteScore calculated here: {route_score}') here = round(route_score, 10) print(f'Stored RouteScore: {target.RouteScore}') stored = round(target.RouteScore, 10) print(here == stored) if here == stored: print('Success!') ```
github_jupyter
import pandas as pd from rdkit import Chem from rdkit.Chem import Draw from rdkit.Chem import Descriptors from rdkit.Chem import rdChemReactions as Reactions # Define directories SMRY_DIR = 'Rxn_Summs' TYPE_DIR = 'Rxn_Type' INV_DIR = 'Inventory' PROP_DIR = 'Properties' from routescore import General, Reaction_Templates, Calculate gen = General() rxn = Reaction_Templates() calc = Calculate() df = pd.read_pickle('./Targets/targets_Base.pkl') target = df.loc[df.RouteScore.idxmin()] gen.draw_mols([target.pentamer]) scale = 0.0001 rxn_smi = f'{target.a}.{target.b}>>{target.ab}' rxn_obj = Reactions.ReactionFromSmarts(rxn_smi, useSmiles=True) Draw.ReactionToImage(rxn_obj) # List of reactant molecules and eq per reaction site sm_list = [{'smiles': target.a, 'eq': 3}, {'smiles': target.b, 'eq': 1} ] # Number of reaction sites on the B molecule (1) rxn_sites = rxn.stoichiometry(target.b, 'Suzuki') print(f'Number of reaction sites on the molecule: {rxn_sites}') # Total equivalents of A molecule sm_list[0]['eq'] = rxn_sites * sm_list[0]['eq'] print(f"Updated equivalents of {target.a}: {sm_list[0]['eq']}") # Unlike subsequent reactions, scale isn't normalized because for the limiting reagent # We treat commercially available materials as being available in essentially unlimited quantities wS_scale: float = scale print(f'Scale of reaction: {wS_scale} mols') wS_yield: float = 1 print('Assumed reaction yield: 100%') multiplier = rxn_sites product_smiles = target.ab # Get information on starting materials from inventory block_dicts = [calc.get_block_info(sm['smiles']) for sm in sm_list] block_dicts # Get information on reagents for the reaction reaction = gen.load_pkl(TYPE_DIR, 'Suzuki') reaction # Get reaction summary information rxn_smry = gen.load_pkl(SMRY_DIR, 'Suzuki_summary') print('The following numbers assumed that the robot is being used to its full capacity (48 parallel reactions).') n_parr: int = rxn_smry['n_parr'] print(f'Number of reactions performed in parallel: {n_parr}') t_H: float = rxn_smry['t_H'] / n_parr print(f't_H: {t_H} hrs') t_M: float = rxn_smry['t_M'] / n_parr print(f't_M: {t_M} hrs') # Time cost cost_time: float = calc.TTC(t_H, t_M) print(f'Time cost of reaction, TTC = {cost_time} hrs') # List of equivalents for each reactant sm_eqs = [sm['eq'] for sm in sm_list] block_costs = [sm['$/mol'] for sm in block_dicts] # Monetary cost cost_money, naive_chem_cost = calc.money(reaction, block_costs, sm_eqs, t_H, t_M, scale, multiplier) print(f'Monetary cost of reaction: ${cost_money}') block_MWs = [sm['g/mol'] for sm in block_dicts] # Materials cost cost_materials: float = calc.mass(reaction, block_MWs, sm_eqs, scale, multiplier) print(f'Mass cost of reaction: {cost_materials}g') step_score_1 = cost_time * cost_money * cost_materials print(f'StepScore for reaction 1 = {step_score_1} h*$*g') MW: float = Descriptors.MolWt(Chem.MolFromSmiles(product_smiles)) # Add product information to inventory product_dict: dict = { 'Block_type': '-', 'Block_num': 0, 'SMILES': product_smiles, 'Name': '-', 'g/mol': MW, 'Quantity': 0, 'CAD': 0, '$/mol': 0, 'Manual?': '' } calc.update_inventory(product_smiles, product_dict) calc.inv.to_csv('./Inventory/Inventory.csv', index=False) scale = wS_scale * wS_yield rxn_smi = f'{target.ab}.{target.c}>>{target.pentamer}' rxn_obj = Reactions.ReactionFromSmarts(rxn_smi, useSmiles=True) Draw.ReactionToImage(rxn_obj) # List of reactant molecules and eq per reaction site sm_list = [{'smiles': target.ab, 'eq': 3}, {'smiles': target.c, 'eq': 1} ] # Number of reaction sites on the B molecule (1) rxn_sites = rxn.stoichiometry(target.c, 'Suzuki') print(f'Number of reaction sites on the molecule: {rxn_sites}') # Total equivalents of A molecule sm_list[0]['eq'] = rxn_sites * sm_list[0]['eq'] print(f"Updated equivalents of {target.ab}: {sm_list[0]['eq']}") # Normalize scale for quantity of limiting reagent (AB) pS_scale: float = scale / sm_list[0]['eq'] print(f'Scale of reaction: {pS_scale} mols') pS_yield: float = 1 print('Assumed reaction yield: 100%') multiplier = rxn_sites product_smiles = target.pentamer # Get information on starting materials from inventory block_dicts = [calc.get_block_info(sm['smiles']) for sm in sm_list] block_dicts # Get information on reagents for the reaction reaction = gen.load_pkl(TYPE_DIR, 'Suzuki') reaction # Get reaction summary information rxn_smry = gen.load_pkl(SMRY_DIR, 'Suzuki_summary') print('The following numbers assumed that the robot is being used to its full capacity (48 parallel reactions).') n_parr: int = rxn_smry['n_parr'] print(f'Number of reactions performed in parallel: {n_parr}') t_H: float = rxn_smry['t_H'] / n_parr print(f't_H: {t_H} hrs') t_M: float = rxn_smry['t_M'] / n_parr print(f't_M: {t_M} hrs') # Time cost cost_time: float = calc.TTC(t_H, t_M) print(f'Time cost of reaction, TTC = {cost_time} hrs') # List of equivalents for each reactant sm_eqs = [sm['eq'] for sm in sm_list] block_costs = [sm['$/mol'] for sm in block_dicts] # Monetary cost cost_money, naive_chem_cost = calc.money(reaction, block_costs, sm_eqs, t_H, t_M, pS_scale, multiplier) print(f'Monetary cost of reaction: ${cost_money}') block_MWs = [sm['g/mol'] for sm in block_dicts] # Materials cost cost_materials: float = calc.mass(reaction, block_MWs, sm_eqs, pS_scale, multiplier) print(f'Mass cost of reaction: {cost_materials}g') step_score_2 = cost_time * cost_money * cost_materials print(f'StepScore for reaction 2 = {step_score_2} h*$*g') MW: float = Descriptors.MolWt(Chem.MolFromSmiles(product_smiles)) # Add product information to inventory product_dict: dict = { 'Block_type': '-', 'Block_num': 0, 'SMILES': product_smiles, 'Name': '-', 'g/mol': MW, 'Quantity': 0, 'CAD': 0, '$/mol': 0, 'Manual?': '' } calc.update_inventory(product_smiles, product_dict) calc.inv.to_csv('./Inventory/Inventory.csv', index=False) final_scale = pS_scale * pS_yield final_scale route_score = (step_score_1 + step_score_2) / final_scale print(f'RouteScore = {route_score} h*$*g/(mol target)') print(f'RouteScore calculated here: {route_score}') here = round(route_score, 10) print(f'Stored RouteScore: {target.RouteScore}') stored = round(target.RouteScore, 10) print(here == stored) if here == stored: print('Success!')
0.555918
0.86113
<a href="https://colab.research.google.com/github/bereml/iap/blob/master/libretas/1b_pytorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Introducción a PyTorch Curso: [Introducción al Aprendizaje Profundo](http://turing.iimas.unam.mx/~ricardoml/course/iap/). Profesores: [Bere](https://turing.iimas.unam.mx/~bereml/) y [Ricardo](https://turing.iimas.unam.mx/~ricardoml/) Montalvo Lezama. --- --- [PyTorch](https://pytorch.org/) es una biblioteca de software de código abierto para la implementación sencilla y eficiente de redes neuronales profundas. El desarrollador original de la biblioteca es [Soumith Chintala](https://www.youtube.com/watch?v=vkzr1xu-8Nk). <center><img src="https://pytorch.org/assets/images/pytorch-logo.png" width="300" align="center"/></center> <div style="text-align: center">https://pytorch.org</div> ``` import torch ``` --- ## 1 Tensores Un tensor de PyTorch es una arreglo multidimensional, la idea es similar a una arreglo de numpy pero con la diferencia de que se alojan en GPU y pueden rastrean las operaciones que los generaron. Se representan con la clase `torch.Tensor` y y pueden ser booleanos, enteros o flotantes. <center><img src="https://miro.medium.com/max/1000/1*8jdzMrA33Leu3j3F6A8a3w.png" width="600" align="center"/></center> <div style="text-align: center">https://medium.com/@anoorasfatima/10-most-common-maths-operation-with-pytorchs-tensor-70a491d8cafd</div> ### 1.1 A partir de datos ``` # tensor 0 dimensional = escalar s = torch.tensor(True) print(s.shape, s.dtype) print(s) # tensor 1 dimensional = vector v = torch.tensor([1, 2]) print(v.shape, v.dtype) print(v) # tensor 2 dimensional = matriz m = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) print(m.shape, m.dtype) print(m) ``` ### 1.2 Como secuencias ``` # similar a range de python torch.arange(8, dtype=torch.float64) # vector de 0s torch.zeros(8) # matriz de 1s torch.ones([2, 4]) ``` ### 1.3 A partir de otros tensores ``` # tensor de 1s con la misma forma que v torch.zeros_like(v) # tensor de 0s con la misma forma que v torch.ones_like(m) ``` ### 1.4 Muestreando distribuciones ``` # matriz con distribución uniforme en [0,1) torch.rand(5) # vector con distibución normal unitaria torch.normal(0, 1, size=(2, 3)) ``` #### 1.5 De numpy y de vuelta ``` import numpy as np a = np.random.randn(2, 2) t = torch.from_numpy(a) n = t.numpy() type(t), type(n) ``` --- ## 2 Formas y vistas ``` x = torch.arange(12) print(x.shape) print(x) v1 = x.view(2, 6) print(v1.shape) print(v1) v2 = x.view(3, 4) print(v2.shape) print(v2) v3 = x.view(4, -1) print(v3.shape) print(v3) # agregar dimensión v4 = x.unsqueeze(0) print(v4.shape) print(v4) # eliminar dimensión v5 = v4.squeeze() print(v5.shape) print(v5) ``` --- ## 3 Lectura y escritura ``` x = torch.arange(20).reshape(4, 5) x # acceder a un elemento x[0, 0], x[-1, -1] # acceder una fila x[0], x[-1] # acceder una columna x[:, 0], x[:, -1] # rebanada de columnas x[1:3] # rebanada de filas x[:, 1:-1] # rebanada de filas y columnas x[1:3, 1:-1] ``` --- ## 4 Funciones ### 4.1 Operaciones aritméticas ``` x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) print("Suma:", x + y) print("Resta:", x - y) print("División:", x / y) print("Multiplicación:", x * y) print("Potencia:", x ** y) ``` ### 4.2 Álgebra lineal ``` u = torch.arange(4, dtype=torch.float32) v = torch.ones(4, dtype=torch.float32) w = torch.arange(12, dtype=torch.float32).view([3, 4]) print(u) print(v) print(w) # producto escalar 10 * u # producto punto torch.dot(u, v) # producto de matrices # w @ u torch.matmul(w, u) # suma de Einstein torch.einsum('ij,j->i', w, u) # transpuesta w.T ``` ### 4.3 Concatenación y apilado ``` u = torch.arange(12).reshape(3, 4) v = torch.arange(12, 24).reshape(3, 4) # concatenación en filas torch.cat((u, v), dim=0) # concatenación en columnas torch.cat((u, v), dim=1) torch.stack([u, v], dim=0) torch.stack([u, v], dim=1) ``` ### 4.4 Reducción ``` x = torch.arange(12, dtype=torch.float32).reshape(3, 4) x x.sum() # reducir filas x.sum(dim=0) # reducir columnas x.sum(dim=1) x.mean(), x.mean(dim=0), x.mean(dim=1) # máximo del tensor x.max() # máximo de filas x.max(dim=0) # máximo de columnas x.max(dim=1) ``` ### 4.5 Difusión La difusión es un mecanismo que se utiliza para realizar operaciones entre tensores cuando poseen formas diferentes. ``` a = torch.arange(3).view(3, 1) a b = torch.arange(2).view(1, 2) b a + b ``` --- ## 5 Diferenciación automática La [diferenciación automática](https://es.wikipedia.org/wiki/Diferenciaci%C3%B3n_autom%C3%A1tica) es un método para la evaluación de derivadas de una función expresada como un programa de computación usualamente conocido como gráfica de cómputo. <center><img src="https://raw.githubusercontent.com/bereml/iap/master/fig/autodiff.png" width="800" align="center"/></center> <div style="text-align: center">Fuente: Automatic Differentiation in Machine Learning: a Survey, Baydin et. al, 2018.</div> &nbsp; PyTorch permite realizar autodiferenciación manteniendo un arbol de expresiones (gráfica de cómputo) que se ensambla de forma automática conforme se definen las expresiones en el programa. Por ejemplo, consideremos la funcion $f(x, y) = 2 x^{3} + 3 y^{2} + c$ respecto a dos variables independientes $x$ y $y$. Al codificar esta función con tensores, podemos pensar de forma simplificada que Pytorch ensambla (al vuelo y de forma implícita) la siguiente gráfica de cómputo. <center><img src="https://raw.githubusercontent.com/bereml/iap/master/fig/autodiff_example.svg" width="800" align="center"/></center> &nbsp; En esta gráfica de cómputo $x$ y $y$ son tensores hoja, mientras que $f$ es un tensor interno (no hoja). Para crear la gráfica de cómputo los tensores están equipados con los siguientes atributos: * `.grad`: escalar flotante que almacena la evaluación de la derivada, * `.requires_grad` bandera booleana que indica si tensor * `.grad_fn` es la derivada $f'$ respecto a los tensores padre en la gráfica de computo. Por defecto, PyTorch únicamente computa derivadas para tensores hoja que fueron creados con la bandera `requires_grad=True`. Los tensores interno son lo únicos que contienen un `.grad_fn` valido. Para observar como funciona esto, definamos $f(\cdot)$ y derivemos de forma automática con respecto a $x$ y $y$ para obtener $f'_x(2, 3) = 6(2)^2 = 24$, $f'_y(2, 3) = 6(3) = 18$. Primero, definamos tensores con las variables $x$ y $y$ e inspeccionemos sus atributos usados para la autodiferenciación. ``` # creamos el tensor con rastreo de gradiente activado x = torch.tensor(2.0, requires_grad=True) x, x.grad, x.requires_grad, x.grad_fn # creamos el tensor con rastreo de gradiente activado y = torch.tensor(3.0, requires_grad=True) y, y.grad, x.requires_grad # creamos el tensor sin rastreo de gradiente c = torch.tensor(1.0) c, c.grad, c.requires_grad ``` Ahora definamos la función $f(\cdot)$ e inspeccionemos sus atributos. ``` f = 2 * (x ** 3) + 3 * (y ** 2) + c f, f.grad, f.requires_grad, f.grad_fn ``` PyTorch nos indica con una advertencia ya que $f$ es un tensor interno y su atributo `.grad` no será usado durante la diferenciación automática. Ahora, computemos las derivadas $f'_x(2, 3)$ y $f'_y(2, 3)$ con el método `backward()`. ``` f.backward() ``` Finalmente, inspeccionemos el resultado. ``` x.grad, y.grad, c.grad, f.grad ``` ### Participación A Define una función con tres variables independientes, calcula sus derivadas de forma manual y verifica con PyTorch. ## Referencias [Tutorial de autodiferenciación en PyTorch](https://pytorch.org/docs/stable/autograd.html).
github_jupyter
import torch # tensor 0 dimensional = escalar s = torch.tensor(True) print(s.shape, s.dtype) print(s) # tensor 1 dimensional = vector v = torch.tensor([1, 2]) print(v.shape, v.dtype) print(v) # tensor 2 dimensional = matriz m = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) print(m.shape, m.dtype) print(m) # similar a range de python torch.arange(8, dtype=torch.float64) # vector de 0s torch.zeros(8) # matriz de 1s torch.ones([2, 4]) # tensor de 1s con la misma forma que v torch.zeros_like(v) # tensor de 0s con la misma forma que v torch.ones_like(m) # matriz con distribución uniforme en [0,1) torch.rand(5) # vector con distibución normal unitaria torch.normal(0, 1, size=(2, 3)) import numpy as np a = np.random.randn(2, 2) t = torch.from_numpy(a) n = t.numpy() type(t), type(n) x = torch.arange(12) print(x.shape) print(x) v1 = x.view(2, 6) print(v1.shape) print(v1) v2 = x.view(3, 4) print(v2.shape) print(v2) v3 = x.view(4, -1) print(v3.shape) print(v3) # agregar dimensión v4 = x.unsqueeze(0) print(v4.shape) print(v4) # eliminar dimensión v5 = v4.squeeze() print(v5.shape) print(v5) x = torch.arange(20).reshape(4, 5) x # acceder a un elemento x[0, 0], x[-1, -1] # acceder una fila x[0], x[-1] # acceder una columna x[:, 0], x[:, -1] # rebanada de columnas x[1:3] # rebanada de filas x[:, 1:-1] # rebanada de filas y columnas x[1:3, 1:-1] x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) print("Suma:", x + y) print("Resta:", x - y) print("División:", x / y) print("Multiplicación:", x * y) print("Potencia:", x ** y) u = torch.arange(4, dtype=torch.float32) v = torch.ones(4, dtype=torch.float32) w = torch.arange(12, dtype=torch.float32).view([3, 4]) print(u) print(v) print(w) # producto escalar 10 * u # producto punto torch.dot(u, v) # producto de matrices # w @ u torch.matmul(w, u) # suma de Einstein torch.einsum('ij,j->i', w, u) # transpuesta w.T u = torch.arange(12).reshape(3, 4) v = torch.arange(12, 24).reshape(3, 4) # concatenación en filas torch.cat((u, v), dim=0) # concatenación en columnas torch.cat((u, v), dim=1) torch.stack([u, v], dim=0) torch.stack([u, v], dim=1) x = torch.arange(12, dtype=torch.float32).reshape(3, 4) x x.sum() # reducir filas x.sum(dim=0) # reducir columnas x.sum(dim=1) x.mean(), x.mean(dim=0), x.mean(dim=1) # máximo del tensor x.max() # máximo de filas x.max(dim=0) # máximo de columnas x.max(dim=1) a = torch.arange(3).view(3, 1) a b = torch.arange(2).view(1, 2) b a + b # creamos el tensor con rastreo de gradiente activado x = torch.tensor(2.0, requires_grad=True) x, x.grad, x.requires_grad, x.grad_fn # creamos el tensor con rastreo de gradiente activado y = torch.tensor(3.0, requires_grad=True) y, y.grad, x.requires_grad # creamos el tensor sin rastreo de gradiente c = torch.tensor(1.0) c, c.grad, c.requires_grad f = 2 * (x ** 3) + 3 * (y ** 2) + c f, f.grad, f.requires_grad, f.grad_fn f.backward() x.grad, y.grad, c.grad, f.grad
0.530236
0.983832
# Wikipediaにあるページとその出身地を取得 ``` from time import sleep from collections import defaultdict import json from itertools import chain from SPARQLWrapper import SPARQLWrapper from tqdm.notebook import tqdm import pandas as pd from utils import PREFECTURES sparql = SPARQLWrapper(endpoint="http://ja.dbpedia.org/sparql", returnFormat="json") ``` ## 「<都道府県>出身の人物一覧」からのリンク先ページを列挙 e.g., [北海道出身の人物一覧 - Wikipedia](https://ja.wikipedia.org/wiki/%E5%8C%97%E6%B5%B7%E9%81%93%E5%87%BA%E8%BA%AB%E3%81%AE%E4%BA%BA%E7%89%A9%E4%B8%80%E8%A6%A7) - 参考 - [Template‐ノート:日本出身の人物の合意事項 - Wikipedia](https://ja.wikipedia.org/wiki/Template%E2%80%90%E3%83%8E%E3%83%BC%E3%83%88:%E6%97%A5%E6%9C%AC%E5%87%BA%E8%BA%AB%E3%81%AE%E4%BA%BA%E7%89%A9%E3%81%AE%E5%90%88%E6%84%8F%E4%BA%8B%E9%A0%85) - [出身別の人名記事一覧の一覧 - Wikipedia](https://ja.wikipedia.org/wiki/%E5%87%BA%E8%BA%AB%E5%88%A5%E3%81%AE%E4%BA%BA%E5%90%8D%E8%A8%98%E4%BA%8B%E4%B8%80%E8%A6%A7%E3%81%AE%E4%B8%80%E8%A6%A7#%E6%97%A5%E6%9C%AC%E3%81%AE%E5%9C%B0%E5%9F%9F%EF%BC%88%E9%83%BD%E9%81%93%E5%BA%9C%E7%9C%8C%E3%80%81%E5%B8%82%E7%94%BA%E6%9D%91%EF%BC%89) ``` pref_pages = defaultdict(set) for pref_name in tqdm(PREFECTURES): sparql.setQuery(f""" SELECT DISTINCT * WHERE {{ <http://ja.dbpedia.org/resource/{pref_name}出身の人物一覧> dbo:wikiPageWikiLink ?o . }} """) res = sparql.query().convert() for x in res["results"]["bindings"]: assert x["o"]["type"] == "uri" pref_pages[pref_name].add( x["o"]["value"] ) df_count = pd.DataFrame([(k, len(v)) for k, v in pref_pages.items()], columns=["pref", "n_pages"]) df_count all_page_list = sorted(set(chain.from_iterable(pref_pages.values()))) len(all_page_list) with open("../data/workspace/all_page_list.json", "w") as fp: json.dump(all_page_list, fp, indent=2, ensure_ascii=False) !head ../data/workspace/all_page_list.json ``` ## 出身地を取得 ``` def get_locations(page): sparql.setQuery(f""" SELECT DISTINCT * WHERE {{ <{page}> prop-ja:出身地 | prop-ja:出生地 | prop-ja:生誕地 | dbo:birthPlace | prop-ja:origin ?o . }} """) res = sparql.query().convert() items = res["results"]["bindings"] locations = set([x["o"]["value"] for x in items]) return locations get_locations("http://ja.dbpedia.org/resource/米津玄師") get_locations("http://ja.dbpedia.org/resource/尾崎将司") get_locations("http://ja.dbpedia.org/resource/大谷翔平") ``` ### 出身地を取得できない例 あくまで構造化された情報として付与されたものを対象としている。 そのため、例えば自由記述で `○○はA県B市C区出身の人物である` などと記載されている**のみ**の場合は対象外となる [About: 池田幸太郎 (首長)](https://ja.dbpedia.org/page/%E6%B1%A0%E7%94%B0%E5%B9%B8%E5%A4%AA%E9%83%8E_(%E9%A6%96%E9%95%B7)) > 池田幸太郎(いけだ こうたろう、1904年 - 1989年)は、日本の政治家・官僚・薬剤師。北海道旭川市出身。 しかし、プロパティとしてその情報(「北海道旭川市出身」)は付与されていないため、今回の手法では出身地なしとなる。 ``` get_locations("http://ja.dbpedia.org/page/池田幸太郎_(首長)") ``` ### 全件取得 ``` with open("../data/workspace/all_page_list.json") as fp: all_page_list = json.load(fp) len(all_page_list) with open("../data/workspace/page_locations_raw.json") as fp: page_locations_raw = json.load(fp) for page in tqdm(all_page_list): if page in tqdem(page_locations_raw): continue sleep(1) locations = get_locations(page) if not locations: continue page_locations_raw[page] = sorted(locations) with open("../data/workspace/page_locations_raw.json", "w") as fp: json.dump(page_locations_raw, fp, indent=2, ensure_ascii=False) len(page_locations_raw) !head ../data/workspace/page_locations_raw.json !tail ../data/workspace/page_locations_raw.json ``` ### その他 ``` # 出身地を紐付けられなかったページ no_location_pages = [p for p in all_page_list if p.replace("http://ja.dbpedia.org/resource/", "") not in page_locations_raw] with open("../data/workspace/no_location_pages.json", "w") as fp: json.dump(no_location_pages, fp, indent=2, ensure_ascii=False) len(no_location_pages) ```
github_jupyter
from time import sleep from collections import defaultdict import json from itertools import chain from SPARQLWrapper import SPARQLWrapper from tqdm.notebook import tqdm import pandas as pd from utils import PREFECTURES sparql = SPARQLWrapper(endpoint="http://ja.dbpedia.org/sparql", returnFormat="json") pref_pages = defaultdict(set) for pref_name in tqdm(PREFECTURES): sparql.setQuery(f""" SELECT DISTINCT * WHERE {{ <http://ja.dbpedia.org/resource/{pref_name}出身の人物一覧> dbo:wikiPageWikiLink ?o . }} """) res = sparql.query().convert() for x in res["results"]["bindings"]: assert x["o"]["type"] == "uri" pref_pages[pref_name].add( x["o"]["value"] ) df_count = pd.DataFrame([(k, len(v)) for k, v in pref_pages.items()], columns=["pref", "n_pages"]) df_count all_page_list = sorted(set(chain.from_iterable(pref_pages.values()))) len(all_page_list) with open("../data/workspace/all_page_list.json", "w") as fp: json.dump(all_page_list, fp, indent=2, ensure_ascii=False) !head ../data/workspace/all_page_list.json def get_locations(page): sparql.setQuery(f""" SELECT DISTINCT * WHERE {{ <{page}> prop-ja:出身地 | prop-ja:出生地 | prop-ja:生誕地 | dbo:birthPlace | prop-ja:origin ?o . }} """) res = sparql.query().convert() items = res["results"]["bindings"] locations = set([x["o"]["value"] for x in items]) return locations get_locations("http://ja.dbpedia.org/resource/米津玄師") get_locations("http://ja.dbpedia.org/resource/尾崎将司") get_locations("http://ja.dbpedia.org/resource/大谷翔平") get_locations("http://ja.dbpedia.org/page/池田幸太郎_(首長)") with open("../data/workspace/all_page_list.json") as fp: all_page_list = json.load(fp) len(all_page_list) with open("../data/workspace/page_locations_raw.json") as fp: page_locations_raw = json.load(fp) for page in tqdm(all_page_list): if page in tqdem(page_locations_raw): continue sleep(1) locations = get_locations(page) if not locations: continue page_locations_raw[page] = sorted(locations) with open("../data/workspace/page_locations_raw.json", "w") as fp: json.dump(page_locations_raw, fp, indent=2, ensure_ascii=False) len(page_locations_raw) !head ../data/workspace/page_locations_raw.json !tail ../data/workspace/page_locations_raw.json # 出身地を紐付けられなかったページ no_location_pages = [p for p in all_page_list if p.replace("http://ja.dbpedia.org/resource/", "") not in page_locations_raw] with open("../data/workspace/no_location_pages.json", "w") as fp: json.dump(no_location_pages, fp, indent=2, ensure_ascii=False) len(no_location_pages)
0.279435
0.536495
# Module 1: Introduction to Exploratory Data Analysis <a href="https://drive.google.com/file/d/1r4SBY6Dm6xjFqLH12tFb-Bf7wbvoIN_C/view" target="_blank"> <img src="http://www.deltanalytics.org/uploads/2/6/1/4/26140521/screen-shot-2019-01-05-at-4-48-15-pm_orig.png" width="500" height="400"> </a> [(Page 40)](https://drive.google.com/file/d/1r4SBY6Dm6xjFqLH12tFb-Bf7wbvoIN_C/view) In this notebook we dive into some plotting methods commonly used for Exploratory Data Analysis (EDA). Our goals for EDA are to open-mindedly explore the data, and see what insights we may find. The purpose of the EDA approach is to: - maximize insight into a data set - uncover underlying structure - extract important variables - detect outliers and anomalies - test underlying assumptions - develop parsimonious models - determine optimal factor settings In this notebook we'll investigate these plotting techniques: 1. Scatter Plot 1. Scatter Matrix 1. Histogram 1. Bar Plot 1. Box Plot 1. Time Series ### Setup <a id='setup'></a> ``` from datetime import datetime import dateutil.parser import re import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # The command below means that the output of multiple commands in a cell will be output at once from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # The command below tells jupyter to display up to 80 columns, this keeps everything visible pd.set_option('display.max_columns', 80) pd.set_option('expand_frame_repr', True) sns.set_palette("hls") %matplotlib inline ``` With each notebook, we need to read in our dataset ``` path = '../data/' filename = 'loans.csv' df = pd.read_csv(path+filename) df.head() ``` Before diving into our exploratory data analysis, it is worth reiterating that this whole process is about understanding the distribution of data and relationships between different features. When we move on to use machine learning algorithms, we will be asking a question and trying to answer it using the statistical relationships between different features in the data. The EDA analysis will help us shape this question and have a clear idea about how to approach building the algorithm! With that in mind, let's look at several visualization methods to examine the data and any relationships between features… ### 1. Scatter plot To start, the scatter plot! This is a very popular and powerful way to visualize the relationship between two continuous features. Essentially this plot shows us how feature Y changes when feature X is changed. If there is a clear pattern formed in the scatter plot, we say that x and y are **correlated**. There are several outcomes we see on a scatter plot: - Positive Linear = When X increases, y increases and the data points follow an approximate straight line - Negative Linear = When X increase, y decreases and the data points follow an approximate straight line - Non-Linear = When X increases, there is a consistent change in Y but this is not linear. It could be quadratic or exponential for example. - No correlation = When X increases, there is no clear pattern to how y changes, The data points form a random distribution. Let's try this out on our data and choose two continuous variables to plot. First lets extract all the continuous variables from our dataset. ``` numeric_vars = df.select_dtypes(include=[np.number]).columns.tolist() for variable in numeric_vars: print(variable) ``` To start, let's look if there is a relationship between lender_count and loan_amount... intuition suggests that bigger loans much have more lenders. If this is true, we'll see this in the scatter plot! ``` ax = sns.regplot(x='lender_count', y='loan_amount', data=df) ``` Where does the data follow the line? Where does the data __not__ follow the line? What are possible reasons that data does __not__ follow the line? ----- Let's explore another relationship. ------ How about the repayment term and the loan amount? What kind of relationship would you expect between the repayment term and the loan amount? ``` ax = sns.regplot(x='repayment_term', y='loan_amount', data=df) ``` Where does the data follow the line? Where does the data __not__ follow the line? What are possible reasons that data does __not__ follow the line? ### 2. Scatter Matrix When we have lots of continuous variables, we could go through them one by one to see the relationship or we could use a scatterplot matrix! This creates a scatter plot between every combination of variables in a list. Another interesting quality of the scatter matrix is that the diagonals give a histogram of the variable in question. ``` num_df # Let's choose only a couple of columns to examine: columns = ['loan_amount', 'funded_amount', 'status'] num_df = df[columns] # Remove the NaN rows so Seaborn can plot num_df = num_df.dropna(axis=0, how='any') # Create the scatter plot and let's color the data point by their status. sns.pairplot(num_df, hue='status'); ``` What can say about the data? <br> <br> <br> ### 4. Histogram A histogram is useful for looking at the distribution of values for a single variable and also identifying outliers. It shows us the count of data. The plot below shows the data distribution of loan_amount using both bars and a continuous line. Without going into too much detail about the value on the y-axis, what we can take away from this is there is a much higher occurrence of small loans (high bar/peak in the line) and that large loans are much rarer (low bars/drop in the line). ``` sns.distplot(df['loan_amount'].dropna(axis = 0)); # Let's just look at those under 5K small_loans_df = df[(df['loan_amount'] < 5000)] sns.distplot(small_loans_df['loan_amount']); ``` Looking at the loans less than 5000 we see a much clearer distribution, although it is still left-hand skewed. ### 5. Bar Plot Bar plots are useful for understanding how categorical groups are different with respect to a continuous variable. ``` p = sns.barplot(x='sector', y = 'loan_amount', data=df, estimator=np.mean); p.set(title='Average loan amount by sector') p.set_xticklabels(p.get_xticklabels(), rotation=-45); ``` Which sector is the largest? Why? ``` p = sns.barplot(x='sector', y = 'loan_amount', data=df, estimator=np.sum); p.set(title='Total loan amount by sector') p.set_xticklabels(p.get_xticklabels(), rotation=-45); ``` Which sector is the largest? Why? <br> <br> ### 6. Box Plots A box plot describes the distribution of data based on five important summary numbers: the minimum, first quartile, median, third quartile, and maximum. In the simplest box plot the central rectangle spans the first quartile to the third quartile (the interquartile range or IQR). A segment inside the rectangle shows the median and "whiskers" above and below the box show the locations of the minimum and maximum. Lets use this to look at the distribution of borrowers counts by each sector for different loan status for different partners. First lets look at how many loans come from different partners. ``` df_retail = df[df.sector=='Retail'] df_retail.head() sector = 'Retail' df_retail = df[df.sector==sector] p = sns.boxplot(x='sector', y='loan_amount', data=df_retail); p.set(title = f'Loan amounts for {sector}'); p.set_xticklabels(p.get_xticklabels(), rotation=-45); ``` Try this - Select other sectors and see how they look Aha! It looks like we are onto something here... we can see different trends for different partners! We'll look into this further in feature_engineering to see how we can use to create powerful features. ### 7. Time dependancy Quite often it's useful to see how a variable changes over time. This means creating a plot with time on the x-axis and the variable on the y-axis. Lets have a look at how the average loan amount changes over time on a monthly basis. ``` # Convert posted date to a datetime object time_column = 'funded_date' df[time_column] = pd.to_datetime(df[time_column]) # Resample the date to monthly intervals , taking the mean of loan_amount # This creates an array where the index is the timestamp and the value is the mean of loan amount time_data = df.resample('M', on=time_column)['loan_amount'].mean().fillna(0) fig, ax = plt.subplots(figsize=(15,8)) ax.plot(time_data) plt.title('Mean loan_amount over time'); ``` We can look at different timefrance by changing the parameter in resample. Lets look on a weekly basis! ``` # Resample the date to monthly intervals , taking the mean of loan_amount # This creates an array where the index is the timestamp and the value is the mean of loan amount time_data = df.resample('7D', on=time_column)['loan_amount'].mean().fillna(0) fig, ax = plt.subplots(figsize=(15,8)) ax.plot(time_data) plt.title('Mean loan_amount over time'); ``` What is next ------ Next we move on to feature engineering, where we create variables from what we've found! <br> <br> <br> ----
github_jupyter
from datetime import datetime import dateutil.parser import re import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # The command below means that the output of multiple commands in a cell will be output at once from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # The command below tells jupyter to display up to 80 columns, this keeps everything visible pd.set_option('display.max_columns', 80) pd.set_option('expand_frame_repr', True) sns.set_palette("hls") %matplotlib inline path = '../data/' filename = 'loans.csv' df = pd.read_csv(path+filename) df.head() numeric_vars = df.select_dtypes(include=[np.number]).columns.tolist() for variable in numeric_vars: print(variable) ax = sns.regplot(x='lender_count', y='loan_amount', data=df) ax = sns.regplot(x='repayment_term', y='loan_amount', data=df) num_df # Let's choose only a couple of columns to examine: columns = ['loan_amount', 'funded_amount', 'status'] num_df = df[columns] # Remove the NaN rows so Seaborn can plot num_df = num_df.dropna(axis=0, how='any') # Create the scatter plot and let's color the data point by their status. sns.pairplot(num_df, hue='status'); sns.distplot(df['loan_amount'].dropna(axis = 0)); # Let's just look at those under 5K small_loans_df = df[(df['loan_amount'] < 5000)] sns.distplot(small_loans_df['loan_amount']); p = sns.barplot(x='sector', y = 'loan_amount', data=df, estimator=np.mean); p.set(title='Average loan amount by sector') p.set_xticklabels(p.get_xticklabels(), rotation=-45); p = sns.barplot(x='sector', y = 'loan_amount', data=df, estimator=np.sum); p.set(title='Total loan amount by sector') p.set_xticklabels(p.get_xticklabels(), rotation=-45); df_retail = df[df.sector=='Retail'] df_retail.head() sector = 'Retail' df_retail = df[df.sector==sector] p = sns.boxplot(x='sector', y='loan_amount', data=df_retail); p.set(title = f'Loan amounts for {sector}'); p.set_xticklabels(p.get_xticklabels(), rotation=-45); # Convert posted date to a datetime object time_column = 'funded_date' df[time_column] = pd.to_datetime(df[time_column]) # Resample the date to monthly intervals , taking the mean of loan_amount # This creates an array where the index is the timestamp and the value is the mean of loan amount time_data = df.resample('M', on=time_column)['loan_amount'].mean().fillna(0) fig, ax = plt.subplots(figsize=(15,8)) ax.plot(time_data) plt.title('Mean loan_amount over time'); # Resample the date to monthly intervals , taking the mean of loan_amount # This creates an array where the index is the timestamp and the value is the mean of loan amount time_data = df.resample('7D', on=time_column)['loan_amount'].mean().fillna(0) fig, ax = plt.subplots(figsize=(15,8)) ax.plot(time_data) plt.title('Mean loan_amount over time');
0.639511
0.986284
# Conditional actions * **Difficulty level**: easy * **Time need to lean**: 10 minutes or less * **Key points**: * Normal `break`, `continue`, `return` structures cannot be used in the implicit loops of substeps * Action `warn_if` gives an warning under specified conditions * Action `fail_if` raises an exception that terminates the substep and therefore the entire workflow if a condition is met * Action `done_if` assumes that the substep is completed and ignores the rest of the statements * Action `skip_if` skips the substep and removed `_output` even if the `_output` has been generated ## Control structures of substeps ``` # create a few input files for examples !touch a_0.txt a_1.txt a_2.txt a_3.txt ``` SoS allows the use of arbitrary Python statements in step processes. For example, suppose you are processing a number of input files and some of them contain errors and have to be ignored, you can write a workflow step as follows: ``` infiles = [f'a_{i}.txt' for i in range(4)] outfiles = [] for idx, infile in enumerate(infiles): if idx == 2: # problematic step continue out = f'a_{idx}.out' sh(f'echo generating {out}\ntouch {out}') outfiles.append(out) ``` However, as we have discussed in tutorials [How to include scripts in different langauges in SoS workflows](script_format.html) and [How to specify input and output files and process input files in groups](input_substeps.html), steps written with loops and function calls like `sh()` are not very readable because the scripts are not clearly presented and users have to follow the logics of the code. Also, the input files are not processed in parallel so the step is not executed efficiently. The more SoS way to implement the step is to use input and output statements and script format of function calls as follows: ``` input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} ``` The problem is that substeps are processed concurrently and we do not yet have a way to treat them differentially and introduce the logic of ``` if idx == 2: # problematic step continue ``` ## Action `skip_if` <div class="bs-callout bs-callout-primary" role="alert"> <h4>Action <code>skip_if(expr, msg)</code></h4> <p>Action <code>skip_if(expr, msg)</code> skips the execution of the substep if condition <code>expr</code> is met. It also assume that the substep generates no output and set <code>_output</code> to empty. The usage pattern of <code>skip_if</code> is</p> <pre> output: ... skip_if(...) statements to produce _output </pre> </div> The `skip_if` action allows you to skip certain substeps with certain condition. The condition can involve a (mostly) hidden variable `_index` which is the index of the substep. For example, the aforementioned step can be written as ``` input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') skip_if(_index == 2, 'input 2 has some problem') sh: expand=True echo generating {_output} touch {_output} ``` It is important to remember that `skip_if` assumes that substep output is not generated and adjust `_output` accordingly. For example, if you pass the output of the step to another step, you will notice that the output of step `2` is empty. ``` %run -v0 [10] input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') skip_if(_index == 2, 'input 2 has some problem') sh: expand=True echo generating {_output} touch {_output} [20] print(f'Input of {_index} is {_input}') ``` ## Action `done_if` <div class="bs-callout bs-callout-primary" role="alert"> <h4>Action <code>done_if(expr, msg)</code></h4> <p>Action <code>done_if(expr, msg)</code> ignores the rest of the step process, assuming that the substep has been completed with output generated. The usage pattern of <code>done_if</code> is</p> <pre> output: ... statements to produce _output done_if(...) additional statements </pre> </div> A similar action is `done_if`, which also ignores the rest of the step process but assumes that the output has already been generated. Consequently, this action does not adjust `_output`. For example, if some more work is only applied to a subset of substeps, you can use `done_if` to execute additional code to only selected substeps. ``` %run -v0 [10] input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} done_if(_index != 2, 'input 2 need to be fixed') sh: expand=True echo "Fixing {_output}" [20] print(f'Input of {_index} is {_input}') ``` ## Action `warn_if` <div class="bs-callout bs-callout-primary" role="alert"> <h4>Action <code>warn_if(expr, msg)</code></h4> <p>Action <code>warn_if(expr, msg)</code> gives an warning if a specified condition is met.</p> </div> Action `warn_if` is very easy to use. It just produces an warning message if something suspicious is detected. ``` input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} warn_if(_index == 2, 'input 2 might be problematic') ``` ## Action `fail_if` <div class="bs-callout bs-callout-primary" role="alert"> <h4>Action <code>fail_if(expr, msg)</code></h4> <p>Action <code>fail_if(expr, msg)</code> terminates the execution of workflow if a condition is met.</p> </div> Action `fail_if` terminates the execution of the workflow under certain conditions. It kills all other processes (e.g. working substeps or nested workflows) and it should be used with caution if is unsafe to terminate the workflow abruptly. For example, if we decide to terminate the entire workflow if we detect something wrong with an input file, we can do ``` %env --expect-error input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} fail_if(_index == 2, 'input 2 might be problematic') ```
github_jupyter
# create a few input files for examples !touch a_0.txt a_1.txt a_2.txt a_3.txt infiles = [f'a_{i}.txt' for i in range(4)] outfiles = [] for idx, infile in enumerate(infiles): if idx == 2: # problematic step continue out = f'a_{idx}.out' sh(f'echo generating {out}\ntouch {out}') outfiles.append(out) input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} if idx == 2: # problematic step continue input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') skip_if(_index == 2, 'input 2 has some problem') sh: expand=True echo generating {_output} touch {_output} %run -v0 [10] input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') skip_if(_index == 2, 'input 2 has some problem') sh: expand=True echo generating {_output} touch {_output} [20] print(f'Input of {_index} is {_input}') %run -v0 [10] input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} done_if(_index != 2, 'input 2 need to be fixed') sh: expand=True echo "Fixing {_output}" [20] print(f'Input of {_index} is {_input}') input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} warn_if(_index == 2, 'input 2 might be problematic') %env --expect-error input: [f'a_{i}.txt' for i in range(4)], group_by=1 output: _input.with_suffix('.out') sh: expand=True echo generating {_output} touch {_output} fail_if(_index == 2, 'input 2 might be problematic')
0.308919
0.912475
<img src="images/logodwengo.png" alt="Banner" width="150"/> <div> <font color=#690027 markdown="1"> <h1>REKENEN</h1> </font> </div> <div class="alert alert-box alert-success"> In Python kan je ook rekenkundige bewerkingen maken. Python voorziet daartoe een aantal operatoren. <br> Soms zal het ook nodig zijn een module te importeren die meer mogelijkheden biedt dan de standaardoperatoren. </div> <div> <font color=#690027 markdown="1"> <h2>1. Van wiskunde naar Python: rekenkundige operatoren</h2> </font> </div> <table> <thead> <tr> <th>&nbsp;</th> <th><p align="center">Wiskunde</th> <th>&nbsp;</th> <th><p align="center">Python</th> <th>&nbsp;</th> </thead> <tr> <td> <p align="left">plus <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; + <td> <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; + <td> <p align="left">&nbsp;&nbsp;&nbsp; kan ook toegepast worden op strings <tr> <td> <p align="left">min <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; - <td> <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; - <td> <tr> <td> <p align="left">maal <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; . <td> <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; * <td> <p align="left">&nbsp;&nbsp;&nbsp; kan ook toegepast worden op strings <tr> <td> <p align="left">gedeeld door <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; / <td> <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; / <td> <tr> <td> <p align="left">gehele deling &nbsp;&nbsp; <td> <td> <td> <p align="center">&nbsp;&nbsp;&nbsp;&nbsp; // <td> <p align="left">&nbsp;&nbsp;&nbsp; geeft geheel deel van quotiënt (afgerond naar beneden) <tr> <td> <p align="left">modulo <td> <p align="center">&nbsp;&nbsp; mod <td> <td> <p align="center">&nbsp;&nbsp;&nbsp; % <td> <p align="left">&nbsp;&nbsp;&nbsp; geeft rest na deling <tr> <td> <p align="left">macht <td> <p align="center">&nbsp;&nbsp;&nbsp; $a^b$ <td> <td> <p align="center">&nbsp;&nbsp;&nbsp; a**b <td> </table> ### Opdracht 1.1 Verschillende operatoren Er wordt telkens een bewerking gegeven in de notatie die je gewoon bent uit de wiskundeles.<br> Bedenk zelf de uitkomst van de bewerking én controleer door de bewerking via een Python-instructie, die je ingeeft in de code-cel, uit te voeren. $2+7\cdot8 =$ ``` 2 + 7 * 8 ``` $ (2 + 7)\cdot 8 =$ $ 2^5 =$ $ -3^2 = $ $17$ mod $4 =$ $ 15 \; / \; 2 = $ $ 12 \; / \; 3 = $ <div class="alert alert-box alert-info"> 32 heeft het type <b>integer (int)</b>.<br> Een decimaal getal zoals 7,5 heeft het type <b>floating-point number (float)</b>. </div> ``` # type van 32 type(32) ``` ### Opdracht 1.2 Geef de instructie in om het type dat 7.5 heeft, op te vragen. ``` # type van 7.5 ``` ### Verschillende operatoren voor een deling in Python 'Gedeeld door' kan je in Python zowel met de operator `/` als de operator `//` laten uitvoeren.<br> **Wat is nu juist het verschil tussen de operator `/` en de operator `//`?** ### Opdracht 1.3 Het verschil tussen / en // Voer telkens de instructie in de code-cel uit en vergelijk de uitvoer van de verschillende instructies. ``` 9 / 2 9.0 / 2.0 9.0 / 2 9 / 2.0 9 // 2 9.0 // 2 ``` Begrijp je nu het verschil tussen `/` en `//` ? <div> <font color=#690027 markdown="1"> <h2>2. De standaardmodule math</h2> </font> </div> <div class="alert alert-block alert-success"> In een <b>module</b> zitten heel wat functies vervat die ervaren informatici reeds voor jou hebben geprogrammeerd. Zo wordt het gebruik van Python zeer toegankelijk. Om een module te kunnen gebruiken, moet je die eerst <b>importeren</b>. </div> Vaak zijn deze eenvoudige rekenkundige bewerkingen niet voldoende. Soms wil je bv. een vierkantswortel berekenen.<br> In de *module* math zijn een heleboel wiskundige functies voorzien. Door de module te importeren, kan je gebruikmaken van al deze functies. <br> Importeren doe je door de volgende instructie uit te voeren. ``` import math # module importeren ``` <div class="alert alert-block alert-success"> Met <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;"><b>#</b></span> kan je commentaar toevoegen die verduidelijkt wat de code doet. Commentaar maakt de code gemakkelijker leesbaar. <br> Bij het uitvoeren van de code wordt de commentaar door Python overgeslagen. </div> Behalve met vierkantswortels, kan je nu bv. ook met het getal $\pi$ en met goniometrische functies aan de slag. Test eens uit. ``` # constante pi wordt opgeroepen uit module 'math' en wordt afgedrukt print(math.pi) # functie sqrt() wordt opgeroepen uit module 'math' # vierkantswortel van 16 wordt berekend en afgedrukt print(math.sqrt(16)) ``` Je kan het ook als volgt doen. Je hoeft dan de naam van de module niet meer te vermelden om een functie of een constante op te roepen. ``` from math import sqrt, pi # nodige constante en functies importeren uit module math # ............. print(pi) # ............. print(sqrt(2)) ``` <div class="alert alert-block alert-info"> <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">sqrt()</span> is een functie van de module math. </div> ### Opdracht 2.1 Vul de commentaar in de vorige code-cel aan. <div> <font color=#690027 markdown="1"> <h2>3. Oefeningen</h2> </font> </div> Los volgende oefeningen op door een instructie te schrijven in de voorziene code-cellen. Voer die dan ook uit! 1) Hoeveel boeketten van € 53 kan men kopen met € 2564? Antwoord: 2) Bereken de inhoud van een bol met straal 7 cm. Controleer met je GRM. Antwoord: 3) Tina heeft deelgenomen aan een loopwedstrijd van 6 mijl in Washington. Ze legde de afstand af in 62 minuten en 20 seconden. Aan welke snelheid, uitgedrukt in km/h, liep Tina gemiddeld? (1 mijl is gelijk aan 1,61 km.) Antwoord: 4) Hoeveel combinaties kan je maken met een 0 of een 1 op 8 plaatsen **. . . . . . . .** , zoals 01100100? Antwoord: <div class="alert alert-block alert-info"> Een <b>bit</b> is een informatie-eenheid. De term is afkomstig van <b>binary digit</b>. Het is een eenheid die enkel de waarden 0 en 1 kan aannemen. <br>Acht bits vormen samen een <b>byte</b>. </div> <div class="alert alert-block alert-danger"> Opmerking: code die <b>uitgevoerd</b> werd in de notebook, blijft <b>onthouden</b>. Als je dus ergens een fout hebt gemaakt tijdens het programmeren en je bent niet zeker dat alle variabelen correct zijn onthouden, herstart dan eens de notebook door gebruik te maken van de knoppen in de werkbalk: <b>Kernel</b> > <b>Restart & Clear Output</b>. </div> <img src="images/cclic.png" alt="Banner" align="left" width="100"/><br><br> Notebook Python in wiskunde, zie Computationeel denken - Programmeren in Python van <a href="http://www.aiopschool.be">AI Op School</a>, van F. wyffels, B. Van de Velde & N. Gesquière is in licentie gegeven volgens een <a href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>.
github_jupyter
2 + 7 * 8 # type van 32 type(32) # type van 7.5 9 / 2 9.0 / 2.0 9.0 / 2 9 / 2.0 9 // 2 9.0 // 2 import math # module importeren # constante pi wordt opgeroepen uit module 'math' en wordt afgedrukt print(math.pi) # functie sqrt() wordt opgeroepen uit module 'math' # vierkantswortel van 16 wordt berekend en afgedrukt print(math.sqrt(16)) from math import sqrt, pi # nodige constante en functies importeren uit module math # ............. print(pi) # ............. print(sqrt(2))
0.218336
0.898411
<a href="https://colab.research.google.com/github/ZauggGroup/DeePiCt/blob/main/DeePiCt_predict2d.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # DeePiCt 2D U-Net segmentation This Colab notebook can be used to create predictions with already trained 2D models for cytosol and organelle prediction. The tomogram that you want to use for prediction should be available somewhere online, for example on Drive. The initial step of the spectrum matching filter is not included in the notebook, and you should execute it beforehand. For more details about the model, follow the instructions available on the [DeePiCt Github repository](https://github.com/ZauggGroup/DeePiCt/blob/main/README.md). ## Instructions: * This notebook includes 4 steps to segment the tomogram and optional step 5 for visualization of the result. * Make sure that the tomogram with applied filter is available on an online share, example Google Drive. * Run the cells in the order that they are displayed. To run a cell, you need to click the play button on the left corner of the cell. * Some cells contain parameters that need to be defined, so make sure you enter all the required information correctly before running the particular cell. You have to run the cell, so that the parameter value is saved. # Configurations ___ ### Make sure you have GPU access enabled by going to Runtime -> Change Runtime Type -> Hardware accelerator and selecting GPU ## Step 1. Installations ``` #@markdown ## 1.1. Run this cell to connect your Google Drive to colab #@markdown * Click on the URL. #@markdown * Sign in your Google Account. #@markdown You will either have to: #@markdown * copy the authorisation code and enter it into box below OR #@markdown * in the new google colab, you can just click "Allow" and it should connect. #@markdown * Click on "Folder" icon on the Left, press the refresh button. Your Google Drive folder should now be available here as "gdrive". # mount user's Google Drive to Google Colab. from google.colab import drive drive.mount('/content/gdrive') #@markdown ## 1.2. Run this cell to install necessary packages #@markdown The code in this cell: #@markdown * Gets the git repository of DeePiCt !git clone https://github.com/ZauggGroup/DeePiCt.git #@markdown * Installs required packages !pip install mrcfile !pip install h5py==2.10.0 !pip install tensorflow-gpu==2.0.0 !pip install keras==2.3.1 ``` ## Step 2. Set the data variables and config file ___ ``` #@markdown ## 2.1. Choose the model based on what you want to segment. The available models are prediction for cytosol and organelle. # Define the variable: predict_type = "cytosol" #@param ["cytosol","organelles"] models_weights = {"cytosol": "https://www.dropbox.com/sh/oavbtcvusi07xbh/AAAI0DrqdCOVKeCLjf0EcdBva/2d_cnn/vpp_model_cytosol_eq.h5?dl=0", "organelles": "https://www.dropbox.com/sh/oavbtcvusi07xbh/AAA2DxQVSKqIygfHa51mdM30a/2d_cnn/vpp_model_organelles_eq.h5?dl=0"} !wget -O model_weights.h5 {models_weights[predict_type]} from posixpath import split #@markdown ## 2.2. Define important variables #@markdown ### Define the following information in the given variables: srcdir = '/content/DeePiCt/2d_cnn' original_config_file = '/content/DeePiCt/2d_cnn/config.yaml' model_path = '/content/model_weights.h5' # Define the folowing variables: # @markdown * **ID/name for the tomogram**: tomo_name = '180426_005' #@param {type:"string"} # @markdown * **Path to the tomogram .mrc file**: tomogram_path = '/content/gdrive/MyDrive/tomo_data/match_spectrum_filt.mrc' #@param {type:"string"} # @markdown * **Use n/2 slices above and below z center. If 0, select all labeled slices**: z_cutoff = 0 #@param {type:"integer"} #@markdown ### The following variables you don't need to change: # @markdown * *Path where the config .yaml file will be saved (you can leave the default option)*: user_config_file = '/content/gdrive/MyDrive/DeePiCt_2d/config.yaml' #@param {type:"string"} # @markdown * *Path where the data .csv file will be saved (you can leave the default option)*: user_data_file = '/content/gdrive/MyDrive/DeePiCt_2d/data.csv' #@param {type:"string"} # @markdown * *Path to folder where the prediction files will be saved (you can leave the default option)*: user_prediction_folder = '/content/gdrive/MyDrive/DeePiCt_2d/predictions/' #@param {type:"string"} import os os.makedirs(os.path.split(user_config_file)[0], exist_ok=True) os.makedirs(os.path.split(user_data_file)[0], exist_ok=True) os.makedirs(os.path.split(user_prediction_folder)[0], exist_ok=True) if z_cutoff == 0: z_cutoff = None #@markdown ## 2.3. Create data csv file and yaml config file #@markdown Run this cell to create the .csv data file and .yaml config file import csv import yaml header = ['tomo_name','id','data','filtered_data'] # Define the elements of this list: data = [tomo_name, tomo_name,'', tomogram_path] with open(user_data_file, 'w', encoding='UTF8') as f: writer = csv.writer(f) # write the header writer.writerow(header) # write the data writer.writerow(data) data_dictionary = dict(zip(header, data)) def read_yaml(file_path): with open(file_path, "r") as stream: data = yaml.safe_load(stream) return data def save_yaml(data, file_path): with open(file_path, 'w') as yaml_file: yaml.dump(data, yaml_file, default_flow_style=False) d = read_yaml(original_config_file) d['prediction_data'] = user_data_file d['output_dir'] = user_prediction_folder d['preprocessing']['filtering']['active'] = False d['preprocessing']['filtering']['target_spectrum'] = '' d['preprocessing']['filtering']['lowpass_cutoff'] = 350 d['preprocessing']['filtering']['smoothen_cutoff'] = 20 d['preprocessing']['slicing']['z_cutoff'] = z_cutoff d['training']['evaluation']['active'] = False d['training']['production']['active'] = False d['prediction']['active'] = True d['prediction']['model'] = model_path save_yaml(d, user_config_file) ``` ## Step 3. Predict with trained neural network ___ ``` #@markdown ## 3.1. Segment the tomogram #@markdown Run this cell to create the segmentation import os prediction = os.path.join(user_prediction_folder, data_dictionary['id'] + "_pred.mrc") !python /content/DeePiCt/2d_cnn/scripts/predict_organelles.py \ --features {data_dictionary['filtered_data']} \ --output {prediction} \ --model {model_path} \ --config {user_config_file} ``` ## Step 4. Post-processing of the prediction ___ ``` #@markdown ## 4.1. Post-processing of the prediction #@markdown Run this cell to do post-processing of the prediction import os post_processed_prediction = os.path.join(user_prediction_folder, data_dictionary['id'] + "_post_processed_pred.mrc") !python3 /content/DeePiCt/2d_cnn/scripts/postprocess.py \ --input {prediction} \ --output {post_processed_prediction} \ --config {user_config_file} ``` # Step 5. Visualize results ___ ``` #@markdown ## 5.1. Read the tomogram and the prediction #@markdown Run this cell to read the tomogram and the predictions import mrcfile def read_tomogram(filename): with mrcfile.open(filename, permissive=True) as m: return m.data tomogram = read_tomogram(data_dictionary['filtered_data']) predictions = read_tomogram(post_processed_prediction) #@markdown ## 5.2. Visualize the prediction #@markdown Run this cell to do visualization of particular z axis z_axis = 100 #@param {type:"integer"} import numpy as np import matplotlib.pyplot as plt # First figure plt.figure(figsize = (10,10)) plt.imshow(tomogram[z_axis], cmap='gray') # Second figure plt.figure(figsize = (10,10)) plt.imshow(tomogram[z_axis], cmap='gray') alphas = np.zeros(predictions.shape) alphas[predictions > 0] = 0.8 plt.imshow(predictions[z_axis], alpha=alphas[z_axis], cmap='Blues') ```
github_jupyter
#@markdown ## 1.1. Run this cell to connect your Google Drive to colab #@markdown * Click on the URL. #@markdown * Sign in your Google Account. #@markdown You will either have to: #@markdown * copy the authorisation code and enter it into box below OR #@markdown * in the new google colab, you can just click "Allow" and it should connect. #@markdown * Click on "Folder" icon on the Left, press the refresh button. Your Google Drive folder should now be available here as "gdrive". # mount user's Google Drive to Google Colab. from google.colab import drive drive.mount('/content/gdrive') #@markdown ## 1.2. Run this cell to install necessary packages #@markdown The code in this cell: #@markdown * Gets the git repository of DeePiCt !git clone https://github.com/ZauggGroup/DeePiCt.git #@markdown * Installs required packages !pip install mrcfile !pip install h5py==2.10.0 !pip install tensorflow-gpu==2.0.0 !pip install keras==2.3.1 #@markdown ## 2.1. Choose the model based on what you want to segment. The available models are prediction for cytosol and organelle. # Define the variable: predict_type = "cytosol" #@param ["cytosol","organelles"] models_weights = {"cytosol": "https://www.dropbox.com/sh/oavbtcvusi07xbh/AAAI0DrqdCOVKeCLjf0EcdBva/2d_cnn/vpp_model_cytosol_eq.h5?dl=0", "organelles": "https://www.dropbox.com/sh/oavbtcvusi07xbh/AAA2DxQVSKqIygfHa51mdM30a/2d_cnn/vpp_model_organelles_eq.h5?dl=0"} !wget -O model_weights.h5 {models_weights[predict_type]} from posixpath import split #@markdown ## 2.2. Define important variables #@markdown ### Define the following information in the given variables: srcdir = '/content/DeePiCt/2d_cnn' original_config_file = '/content/DeePiCt/2d_cnn/config.yaml' model_path = '/content/model_weights.h5' # Define the folowing variables: # @markdown * **ID/name for the tomogram**: tomo_name = '180426_005' #@param {type:"string"} # @markdown * **Path to the tomogram .mrc file**: tomogram_path = '/content/gdrive/MyDrive/tomo_data/match_spectrum_filt.mrc' #@param {type:"string"} # @markdown * **Use n/2 slices above and below z center. If 0, select all labeled slices**: z_cutoff = 0 #@param {type:"integer"} #@markdown ### The following variables you don't need to change: # @markdown * *Path where the config .yaml file will be saved (you can leave the default option)*: user_config_file = '/content/gdrive/MyDrive/DeePiCt_2d/config.yaml' #@param {type:"string"} # @markdown * *Path where the data .csv file will be saved (you can leave the default option)*: user_data_file = '/content/gdrive/MyDrive/DeePiCt_2d/data.csv' #@param {type:"string"} # @markdown * *Path to folder where the prediction files will be saved (you can leave the default option)*: user_prediction_folder = '/content/gdrive/MyDrive/DeePiCt_2d/predictions/' #@param {type:"string"} import os os.makedirs(os.path.split(user_config_file)[0], exist_ok=True) os.makedirs(os.path.split(user_data_file)[0], exist_ok=True) os.makedirs(os.path.split(user_prediction_folder)[0], exist_ok=True) if z_cutoff == 0: z_cutoff = None #@markdown ## 2.3. Create data csv file and yaml config file #@markdown Run this cell to create the .csv data file and .yaml config file import csv import yaml header = ['tomo_name','id','data','filtered_data'] # Define the elements of this list: data = [tomo_name, tomo_name,'', tomogram_path] with open(user_data_file, 'w', encoding='UTF8') as f: writer = csv.writer(f) # write the header writer.writerow(header) # write the data writer.writerow(data) data_dictionary = dict(zip(header, data)) def read_yaml(file_path): with open(file_path, "r") as stream: data = yaml.safe_load(stream) return data def save_yaml(data, file_path): with open(file_path, 'w') as yaml_file: yaml.dump(data, yaml_file, default_flow_style=False) d = read_yaml(original_config_file) d['prediction_data'] = user_data_file d['output_dir'] = user_prediction_folder d['preprocessing']['filtering']['active'] = False d['preprocessing']['filtering']['target_spectrum'] = '' d['preprocessing']['filtering']['lowpass_cutoff'] = 350 d['preprocessing']['filtering']['smoothen_cutoff'] = 20 d['preprocessing']['slicing']['z_cutoff'] = z_cutoff d['training']['evaluation']['active'] = False d['training']['production']['active'] = False d['prediction']['active'] = True d['prediction']['model'] = model_path save_yaml(d, user_config_file) #@markdown ## 3.1. Segment the tomogram #@markdown Run this cell to create the segmentation import os prediction = os.path.join(user_prediction_folder, data_dictionary['id'] + "_pred.mrc") !python /content/DeePiCt/2d_cnn/scripts/predict_organelles.py \ --features {data_dictionary['filtered_data']} \ --output {prediction} \ --model {model_path} \ --config {user_config_file} #@markdown ## 4.1. Post-processing of the prediction #@markdown Run this cell to do post-processing of the prediction import os post_processed_prediction = os.path.join(user_prediction_folder, data_dictionary['id'] + "_post_processed_pred.mrc") !python3 /content/DeePiCt/2d_cnn/scripts/postprocess.py \ --input {prediction} \ --output {post_processed_prediction} \ --config {user_config_file} #@markdown ## 5.1. Read the tomogram and the prediction #@markdown Run this cell to read the tomogram and the predictions import mrcfile def read_tomogram(filename): with mrcfile.open(filename, permissive=True) as m: return m.data tomogram = read_tomogram(data_dictionary['filtered_data']) predictions = read_tomogram(post_processed_prediction) #@markdown ## 5.2. Visualize the prediction #@markdown Run this cell to do visualization of particular z axis z_axis = 100 #@param {type:"integer"} import numpy as np import matplotlib.pyplot as plt # First figure plt.figure(figsize = (10,10)) plt.imshow(tomogram[z_axis], cmap='gray') # Second figure plt.figure(figsize = (10,10)) plt.imshow(tomogram[z_axis], cmap='gray') alphas = np.zeros(predictions.shape) alphas[predictions > 0] = 0.8 plt.imshow(predictions[z_axis], alpha=alphas[z_axis], cmap='Blues')
0.561936
0.909586
# Importing Data of The web ## Importing flat files from the web Now, you will learn how import data of the web. There are so many web pages that has data set avalaible where you can learn and practice what we have learnt until now. One of them is the Machine learning repository hosted by [the University of California, Irvine](https://archive.ics.uci.edu/ml/datasets.php?format=&task=&att=&area=bus&numAtt=&numIns=&type=&sort=nameUp&view=table) But why do we need to download the data through by coding that in Python? Well, it has a big deal. If you download the data through the web browser you can't make the project reproducibility. Thinking about it. It is very complicated share you project with the correspond data if the data if very big. So, in order to deal with this, you can import the data directly on the code through the web link or URL. Therefore, let's look how do it. ``` # Import package from urllib.request import urlretrieve # Import pandas import pandas as pd # Assign url of file: url url = 'https://data.cdc.gov/api/views/cjae-szjv/rows.csv?accessType=DOWNLOAD' # Save file locally urlretrieve(url, './datasets/Air_Quality_Measures.csv') # Read file into a DataFrame and print its head df = pd.read_csv('./datasets/Air_Quality_Measures.csv', sep=',', header = 0) df.head() df.tail() ``` You have just imported a file from the web, saved it locally and loaded it into a DataFrame. If you just wanted to load a file from the web into a DataFrame without first saving it locally, you can do that easily using pandas. In particular, you can use the function `pd.read_csv()` with the URL as the first argument and the separator `sep` as the second argument. Don't forget use the first row as header of the DataFrame. ``` # Import packages import matplotlib.pyplot as plt # Assign url of file: url url = 'https://data.cdc.gov/api/views/cjae-szjv/rows.csv?accessType=DOWNLOAD' # Read file into a DataFrame: df df = pd.read_csv(url, sep=',', header = 0) # Print the head of the DataFrame df.head() df.shape # Plot first column of df fig = plt.figure(figsize = (28,10)) #ax = fig.gca() df['StateName'][:100000].hist() plt.xlabel('value') plt.ylabel('count') plt.show() ``` ## Importing non-flat files from the web you'll use pd.read_excel() to import an Excel spreadsheet. Your job is to use pd.read_excel() to read in all of its sheets, print the sheet names and then print the head of the first sheet using its name, not its index. Note that the output of pd.read_excel() is a Python dictionary with sheet names as keys and corresponding DataFrames as corresponding values. ``` # Assign url of file: url url = 'http://www.principlesofeconometrics.com/excel/gold.xls' # Read in all sheets of Excel file: xl xl = pd.read_excel(url, sheetname = None) # Print the sheetnames to the shell print(xl.keys()) # Print the head of the first sheet (using its name, NOT its index) xl['Sheet1'].head() ``` ## Request HTTP The requests library is the de facto standard for making HTTP requests in Python. It abstracts the complexities of making requests behind a beautiful, simple API so that you can focus on interacting with services and consuming data in your application. One of the most common HTTP methods is `get()`. The `get()` method indicates that you’re trying to get or retrieve data from a specified resource. To make a GET request, invoke requests.get(). To test this out, you can make a `get()` request to GitHub’s Root REST API by calling get() with the following URL ``` import requests response = requests.get('https://2.python-requests.org//es/latest/') # Extract the response: text text = response.text # Print the html print(text) ``` ## Parsing HTML with BeautifulSoup In this interactive exercise, you'll learn how to use the BeautifulSoup package to parse, prettify and extract information from HTML. You'll scrape the data from the webpage of Beautiful Soup, which is the web page of the correspond lybrary. In the following exercises, you'll prettify the HTML and then extract the text and the hyperlinks. The URL of interest is url = 'https://www.crummy.com/software/BeautifulSoup/' ### getting the text ``` from bs4 import BeautifulSoup # Specify url: url url = 'https://www.crummy.com/software/BeautifulSoup/' # Package the request, send the request and catch the response: r r = requests.get(url) # Extracts the response as html: html_doc html_doc = r.text # Create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # Prettify the BeautifulSoup object: pretty_soup pretty_soup = soup.prettify() # Print the response print(pretty_soup) # Get the title of Guido's webpage: guido_title BeautifulSoup_title = soup.title # Print the title of Guido's webpage to the shell print(BeautifulSoup_title) # Get Guido's text: guido_text BeautifulSoup_text = soup.get_text() # Print Guido's text to the shell print(BeautifulSoup_text) ``` ### getting the hyperlinks In this exercise, you'll figure out how to extract the URLs of the hyperlinks from the BDFL's webpage. In the process, you'll become close friends with the soup method find_all() ``` # Find all 'a' tags (which define hyperlinks): a_tags a_tags = soup.find_all('a') # Print the URLs to the shell for link in a_tags: print(link.get('href')) ``` ## Introduction to APIs and JSONs ### JSON JavaScript Object Notation (JSON). JSON is a way to encode data structures like lists and dictionaries to strings that ensures that they are easily readable by machines. JSON is the primary format in which data is passed back and forth to APIs, and most API servers will send their responses in JSON format. Python has great JSON support, with the json package. The json package is part of the standard library, so we don’t have to install anything to use it. We can both convert lists and dictionaries to JSON, and convert strings to lists and dictionaries. The json library has two main methods: * dumps — Takes in a Python object, and converts it to a string. * loads — Takes a JSON string, and converts it to a Python object. Moreover, you can get awesome free datasets of the repository hosted [here](https://json-datasets.zeef.com/jdorfman) ``` import json # Make a list of fast food chains. best_food_chains = ["Taco Bell", "Shake Shack", "Chipotle"] # Import the json library print(best_food_chains, "his type is ", type(best_food_chains)) # Use json.dumps to convert best_food_chains to a string. best_food_chains_string = json.dumps(best_food_chains) # We've successfully converted our list to a string. print(best_food_chains_string, "his type is ", type(best_food_chains_string)) print("First position of list best_food_chains :",best_food_chains[0]) print("First position of listbest_food_chains_string :",best_food_chains_string[0]) # Convert best_food_chains_string back into a list best_food_chains_string_list = json.loads(best_food_chains_string) print(best_food_chains_string_list, "his type is ", type(best_food_chains_string_list)) # Make a dictionary fast_food_franchise = { "Subway": 24722, "McDonalds": 14098, "Starbucks": 10821, "Pizza Hut": 7600 } # We can also dump a dictionary to a string and load it. fast_food_franchise_string = json.dumps(fast_food_franchise) print(fast_food_franchise_string) ``` ### Open JSON files you'll load one into your Python environment and explore it yourself. Here, you'll load the JSON **ferrari.json** into the variable ferrari, which will be a dictionary. You'll then explore the JSON contents by printing the key-value pairs of json_data to the shell. ``` import json # Load JSON: ferrari with open("./datasets/ferrari.json") as file: ferrari = json.load(file) tamaño = len(ferrari['MODELS']) modelos=[] for i in range(tamaño): dictonario = ferrari['MODELS'][i] modelos.append(dictonario["model_name"]) ferrari_dict = { "modelos": modelos} pd_ferrari = pd.DataFrame(ferrari_dict) pd_ferrari.head() ``` Exercise: Try to do the same process with other dataset obtained of https://json-datasets.zeef.com/jdorfman ### API: Application Programming Interface API is a is a software intermediary that allows two applications to talk to each other. Each time you use an app like Facebook, send an instant message, or check the weather on your phone, you’re using an API When you use an application on your mobile phone, the application connects to the Internet and sends data to a server. The server then retrieves that data, interprets it, performs the necessary actions and sends it back to your phone. The application then interprets that data and presents you with the information you wanted in a readable way. This is what an API is - all of this happens via API. To explain this better, let us take a familiar example. Imagine you’re sitting at a table in a restaurant with a menu of choices to order from. The kitchen is the part of the “system” that will prepare your order. What is missing is the critical link to communicate your order to the kitchen and deliver your food back to your table. That’s where the waiter or API comes in. The waiter is the messenger – or API – that takes your request or order and tells the kitchen – the system – what to do. Then the waiter delivers the response back to you; in this case, it is the food. To use an API, you make a request to a remote web server, and retrieve the data you need. But why use an API instead of a static data set you can download? APIs are useful in the following cases: * The data is changing quickly. An example of this is stock price data. It doesn’t really make sense to regenerate a data set and download it every minute — this will take a lot of bandwidth, and be pretty slow. * You want a small piece of a much larger set of data. Reddit comments are one example. What if you want to just pull your own comments on Reddit? It doesn’t make much sense to download the entire Reddit database, then filter just your own comments. * There is repeated computation involved. Spotify has an API that can tell you the genre of a piece of music. You could theoretically create your own classifier, and use it to categorize music, but you’ll never have as much data as Spotify does. In cases like the ones above, an API is the right solution Rigth now, we are going to check out the [the Wikipedia API](https://www.mediawiki.org/wiki/API:Main_page). You'll figure out how to find and extract information from the Wikipedia page for Python. What gets a bit wild here is that your query will return nested JSONs, that is, JSONs with JSONs, but Python can handle that because it will translate them into dictionaries within dictionaries. ``` # Assign URL to variable: url url = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&exintro=&titles=Colombia' # Package the request, send the request and catch the response: r r = requests.get(url) # Decode the JSON data into a dictionary: json_data json_data = r.json() # Print the Wikipedia page extract Colombia = json_data['query']['pages']['5222']['extract'] print(Colombia) # json_data ``` ## Web Scraping. we will go through a simple example of how to scrape a website to gather data on the top 100 companies in 2018 from Fast Track. Automating this process with a web scraper avoids manual data gathering, saves time and also allows you to have all the data on the companies in one structured file. Fist, To know which elements that you need to target in your python code, you need to first inspect the web page. To gather data from [Tech Track Top 100 companies](https://www.fasttrack.co.uk/league-tables/tech-track-100/league-table/?source=post_page---------------------------) you can inspect the page by right clicking on the element of interest and select inspect. This brings up the HTML code where we can see the element that each field is contained within. ``` import csv # specify the url urlpage = 'http://www.fasttrack.co.uk/league-tables/tech-track-100/league-table/' # Package the request, send the request and catch the response: r Ltable = requests.get(urlpage) # Extracts the response as html: html_doc html_doc = Ltable.text # Create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # find results within table table = soup.find('table', attrs={'class': 'tableSorter'}) results = table.find_all('tr') print('Number of results', len(results)) results # create and write headers to a list rows = [] rows.append(['Rank', 'Company Name', 'Webpage', 'Description', 'Location', 'Year end', 'Annual sales rise over 3 years', 'Sales £000s', 'Staff', 'Comments']) print(rows) # loop over results for result in results: # find all columns per result data = result.find_all('td') # check that columns have data if len(data) == 0: continue # write columns to variables rank = data[0].getText() company = data[1].getText() location = data[2].getText() yearend = data[3].getText() salesrise = data[4].getText() sales = data[5].getText() staff = data[6].getText() comments = data[7].getText() print('Company is', company) # Company is WonderblyPersonalised children's books print('Sales', sales) # Sales *25,860 # extract description from the name companyname = data[1].find('span', attrs={'class':'company-name'}).getText() description = company.replace(companyname, '') # remove unwanted characters sales = sales.strip('*').strip('†').replace(',','') # go to link and extract company website url = data[1].find('a').get('href') page = requests.get(urlpage).text # parse the html using beautiful soup and store in variable 'soup' soup = BeautifulSoup(page, 'html.parser') # find the last result in the table and get the link try: tableRow = soup.find('table').find_all('tr')[-1] webpage = tableRow.find('a').get('href') except: webpage = None # write each result to rows rows.append([rank, companyname, webpage, description, location, yearend, salesrise, sales, staff, comments]) print(rows) ## Create csv and write rows to output file with open('./datasets/techtrack100.csv','w', newline='') as f_output: csv_output = csv.writer(f_output) csv_output.writerows(rows) ```
github_jupyter
# Import package from urllib.request import urlretrieve # Import pandas import pandas as pd # Assign url of file: url url = 'https://data.cdc.gov/api/views/cjae-szjv/rows.csv?accessType=DOWNLOAD' # Save file locally urlretrieve(url, './datasets/Air_Quality_Measures.csv') # Read file into a DataFrame and print its head df = pd.read_csv('./datasets/Air_Quality_Measures.csv', sep=',', header = 0) df.head() df.tail() # Import packages import matplotlib.pyplot as plt # Assign url of file: url url = 'https://data.cdc.gov/api/views/cjae-szjv/rows.csv?accessType=DOWNLOAD' # Read file into a DataFrame: df df = pd.read_csv(url, sep=',', header = 0) # Print the head of the DataFrame df.head() df.shape # Plot first column of df fig = plt.figure(figsize = (28,10)) #ax = fig.gca() df['StateName'][:100000].hist() plt.xlabel('value') plt.ylabel('count') plt.show() # Assign url of file: url url = 'http://www.principlesofeconometrics.com/excel/gold.xls' # Read in all sheets of Excel file: xl xl = pd.read_excel(url, sheetname = None) # Print the sheetnames to the shell print(xl.keys()) # Print the head of the first sheet (using its name, NOT its index) xl['Sheet1'].head() import requests response = requests.get('https://2.python-requests.org//es/latest/') # Extract the response: text text = response.text # Print the html print(text) from bs4 import BeautifulSoup # Specify url: url url = 'https://www.crummy.com/software/BeautifulSoup/' # Package the request, send the request and catch the response: r r = requests.get(url) # Extracts the response as html: html_doc html_doc = r.text # Create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # Prettify the BeautifulSoup object: pretty_soup pretty_soup = soup.prettify() # Print the response print(pretty_soup) # Get the title of Guido's webpage: guido_title BeautifulSoup_title = soup.title # Print the title of Guido's webpage to the shell print(BeautifulSoup_title) # Get Guido's text: guido_text BeautifulSoup_text = soup.get_text() # Print Guido's text to the shell print(BeautifulSoup_text) # Find all 'a' tags (which define hyperlinks): a_tags a_tags = soup.find_all('a') # Print the URLs to the shell for link in a_tags: print(link.get('href')) import json # Make a list of fast food chains. best_food_chains = ["Taco Bell", "Shake Shack", "Chipotle"] # Import the json library print(best_food_chains, "his type is ", type(best_food_chains)) # Use json.dumps to convert best_food_chains to a string. best_food_chains_string = json.dumps(best_food_chains) # We've successfully converted our list to a string. print(best_food_chains_string, "his type is ", type(best_food_chains_string)) print("First position of list best_food_chains :",best_food_chains[0]) print("First position of listbest_food_chains_string :",best_food_chains_string[0]) # Convert best_food_chains_string back into a list best_food_chains_string_list = json.loads(best_food_chains_string) print(best_food_chains_string_list, "his type is ", type(best_food_chains_string_list)) # Make a dictionary fast_food_franchise = { "Subway": 24722, "McDonalds": 14098, "Starbucks": 10821, "Pizza Hut": 7600 } # We can also dump a dictionary to a string and load it. fast_food_franchise_string = json.dumps(fast_food_franchise) print(fast_food_franchise_string) import json # Load JSON: ferrari with open("./datasets/ferrari.json") as file: ferrari = json.load(file) tamaño = len(ferrari['MODELS']) modelos=[] for i in range(tamaño): dictonario = ferrari['MODELS'][i] modelos.append(dictonario["model_name"]) ferrari_dict = { "modelos": modelos} pd_ferrari = pd.DataFrame(ferrari_dict) pd_ferrari.head() # Assign URL to variable: url url = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&exintro=&titles=Colombia' # Package the request, send the request and catch the response: r r = requests.get(url) # Decode the JSON data into a dictionary: json_data json_data = r.json() # Print the Wikipedia page extract Colombia = json_data['query']['pages']['5222']['extract'] print(Colombia) # json_data import csv # specify the url urlpage = 'http://www.fasttrack.co.uk/league-tables/tech-track-100/league-table/' # Package the request, send the request and catch the response: r Ltable = requests.get(urlpage) # Extracts the response as html: html_doc html_doc = Ltable.text # Create a BeautifulSoup object from the HTML: soup soup = BeautifulSoup(html_doc) # find results within table table = soup.find('table', attrs={'class': 'tableSorter'}) results = table.find_all('tr') print('Number of results', len(results)) results # create and write headers to a list rows = [] rows.append(['Rank', 'Company Name', 'Webpage', 'Description', 'Location', 'Year end', 'Annual sales rise over 3 years', 'Sales £000s', 'Staff', 'Comments']) print(rows) # loop over results for result in results: # find all columns per result data = result.find_all('td') # check that columns have data if len(data) == 0: continue # write columns to variables rank = data[0].getText() company = data[1].getText() location = data[2].getText() yearend = data[3].getText() salesrise = data[4].getText() sales = data[5].getText() staff = data[6].getText() comments = data[7].getText() print('Company is', company) # Company is WonderblyPersonalised children's books print('Sales', sales) # Sales *25,860 # extract description from the name companyname = data[1].find('span', attrs={'class':'company-name'}).getText() description = company.replace(companyname, '') # remove unwanted characters sales = sales.strip('*').strip('†').replace(',','') # go to link and extract company website url = data[1].find('a').get('href') page = requests.get(urlpage).text # parse the html using beautiful soup and store in variable 'soup' soup = BeautifulSoup(page, 'html.parser') # find the last result in the table and get the link try: tableRow = soup.find('table').find_all('tr')[-1] webpage = tableRow.find('a').get('href') except: webpage = None # write each result to rows rows.append([rank, companyname, webpage, description, location, yearend, salesrise, sales, staff, comments]) print(rows) ## Create csv and write rows to output file with open('./datasets/techtrack100.csv','w', newline='') as f_output: csv_output = csv.writer(f_output) csv_output.writerows(rows)
0.467089
0.918845
# Ordinary Differential Equations **Learning Objectives:** Understand the numerical solution of ODEs and use `scipy.integrate.odeint` to solve and explore ODEs numerically. ## Imports ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns ``` ## Overview of ODEs Many of the equations of Physics, Chemistry, Statistics, Data Science, etc. are *Ordinary Differential Equation* or *ODEs*. An ODE is a differential equation with the form: $$ \frac{d\vec{y}}{dt} = \vec{f}\left(\vec{y}(t), t\right) $$ The goal is usually to solve for the $N$ dimensional state vector $\vec{y}(t)$ at each time $t$ given some initial condition: $$ \vec{y}(0) = \vec{y}_0 $$ In this case we are using $t$ as the independent variable, which is common when studying differential equations that depend on time. But any independent variable may be used, such as $x$. Solving an ODE numerically usually involves picking a set of $M$ discrete times at which we wish to know the solution: ``` tmax = 10.0 # The max time M = 100 # Use 100 times between [0,tmax] t = np.linspace(0,tmax,M) t ``` It is useful to define the step size $h$ as: $$ h = t_{i+1} - t_i $$ ``` h = t[1]-t[0] print("h =", h) ``` The numerical solution of an ODE will then be an $M\times N$ array $y_{ij}$ such that: $$ \left[\vec{y}(t_i)\right]_j = y_{ij} $$ In other words, the rows of the array $y_{ij}$ are the state vectors $\vec{y}(t_i)$ at times $t_i$. Here is an array of zeros having the right shape for the values of $N$ and $M$ we are using here: ``` N = 2 # 2d case y = np.zeros((M, N)) print("N =", N) print("M =", M) print("y.shape =", y.shape) ``` A numerical ODE solver takes the `i`th row of this array `y[i,:]` and calculates the `i+1`th row `y[i+1,:]`. This process starts with the initial condition `y[0,:]` and continues through all of the times with steps of size $h$. One of the core ideas of numerical ODE solvers is that the error at each step is proportional to $\mathcal{O}(h^n)$ where $n\geq1$. Because $h<1$ you can reduce the error by making $h$ smaller (up to a point) or finding an ODE solver with a larger value of $n$. Here are some common numerical algorithms for solving ODEs: 1. The [Euler method](http://en.wikipedia.org/wiki/Euler_method), which has an error of $\mathcal{O}(h)$. 2. The [midpoint method](http://en.wikipedia.org/wiki/Midpoint_method), which has an error of $\mathcal{O}(h^2)$. 3. [Runga-Kutta](http://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) methods, the most common (called *RK4*) of which has an error of $\mathcal{O}(h^4)$. Because Runga-Kutta methods are fast and have a small errors, they are one of the most popular general purpose algorithm for solving ODEs. There are many other specialized methods and tricks for solving ODEs (see [this page](http://en.wikipedia.org/wiki/Numerical_methods_for_ordinary_differential_equations)). One of the most common tricks is to use an adaptive step size, which changes the value of $h$ at each step to make sure the error stays below a certain threshold. ## Using `scipy.integrate.odeint` SciPy provides a general purpose ODE solver, `scipy.integrate.odeint`, that can handle a wide variety of linear and non-linear multidimensional ODEs. ``` from scipy.integrate import odeint odeint? ``` To show how `odeint` works, we will solve the [Lotka–Volterra equations](http://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equation), an example of a *predator-prey* model: $$ \frac{dx}{dt} = \alpha x - \beta x y $$ $$ \frac{dy}{dt} = \delta x y - \gamma y $$ where: * $x(t)$ is the number of prey. * $y(t)$ is the number of predators. * $\alpha$ is the natural birth rate of the prey. * $\gamma$ is the natural death rate of the predators. * $\beta$ determines the death rate of prey when eaten by predators. * $\delta$ determines the growth rate of predators when they eat prey. **It is important to note here that $y(t)$ is different from the overall solutions vector $\vec{y}(t)$. In fact, perhaps confusingly, in this case $\vec{y}(t)=[x(t),y(t)]$.** To integrate this system of differential equations, we must define a function `derivs` that computes the right-hand-side of the differential equation, $\vec{f}(\vec{y}(t), t)$. The signature of this function is set by `odeint` itself: ```python def derivs(yvec, t, *args): ... return dyvec ``` * `yvec` will be a 1d NumPy array with $N$ elements that are the values of the solution at the current time, $\vec{y}(t)$. * `t` will be the current time. * `*args` will be other arguments, typically parameters in the differential equation. The `derivs` function must return a 1d NumPy array with elements that are the values of the function $\vec{f}(\vec{y}(t), t)$. ``` def derivs(yvec, t, alpha, beta, delta, gamma): x = yvec[0] y = yvec[1] dx = alpha*x - beta*x*y dy = delta*x*y - gamma*y return np.array([dx, dy]) ``` Here are the parameters and initial condition we will use to solve the differential equation. In this case, our prey variable $x$ is the number of rabbits and the predator variable $y$ is the number of foxes (foxes eat rabbits). ``` nfoxes = 10 nrabbits = 20 ic = np.array([nrabbits, nfoxes]) maxt = 20.0 alpha = 1.0 beta = 0.1 delta = 0.1 gamma = 1.0 ``` Here we call `odeint` with our `derivs` function, initial condition `ic`, array of times `t` and the extra parameters: ``` t = np.linspace(0, maxt, int(100*maxt)) soln = odeint(derivs, # function to compute the derivatives ic, # array of initial conditions t, # array of times args=(alpha, beta, delta, gamma), # extra args atol=1e-9, rtol=1e-8) # absolute and relative error tolerances ``` We can plot the componenets of the solution as a function of time as follows: ``` plt.plot(t, soln[:,0], label='rabbits') plt.plot(t, soln[:,1], label='foxes') plt.xlabel('t') plt.ylabel('count') plt.legend(); ``` We can also make a parametric plot of $[x(t),y(t)]$: ``` plt.plot(soln[:,0], soln[:,1]) plt.xlim(0, 25) plt.ylim(0, 25) plt.xlabel('rabbits') plt.ylabel('foxes'); ```
github_jupyter
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns tmax = 10.0 # The max time M = 100 # Use 100 times between [0,tmax] t = np.linspace(0,tmax,M) t h = t[1]-t[0] print("h =", h) N = 2 # 2d case y = np.zeros((M, N)) print("N =", N) print("M =", M) print("y.shape =", y.shape) from scipy.integrate import odeint odeint? def derivs(yvec, t, *args): ... return dyvec def derivs(yvec, t, alpha, beta, delta, gamma): x = yvec[0] y = yvec[1] dx = alpha*x - beta*x*y dy = delta*x*y - gamma*y return np.array([dx, dy]) nfoxes = 10 nrabbits = 20 ic = np.array([nrabbits, nfoxes]) maxt = 20.0 alpha = 1.0 beta = 0.1 delta = 0.1 gamma = 1.0 t = np.linspace(0, maxt, int(100*maxt)) soln = odeint(derivs, # function to compute the derivatives ic, # array of initial conditions t, # array of times args=(alpha, beta, delta, gamma), # extra args atol=1e-9, rtol=1e-8) # absolute and relative error tolerances plt.plot(t, soln[:,0], label='rabbits') plt.plot(t, soln[:,1], label='foxes') plt.xlabel('t') plt.ylabel('count') plt.legend(); plt.plot(soln[:,0], soln[:,1]) plt.xlim(0, 25) plt.ylim(0, 25) plt.xlabel('rabbits') plt.ylabel('foxes');
0.581897
0.985243
``` from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.keras import regularizers import tensorflow.keras.utils as ku import numpy as np tokenizer = Tokenizer() !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sonnets.txt \ -O /tmp/sonnets.txt data = open('/tmp/sonnets.txt').read() corpus = data.lower().split("\n") tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 # create input sequences using list of tokens input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i+1] input_sequences.append(n_gram_sequence) # pad sequences max_sequence_len = max([len(x) for x in input_sequences]) input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre')) # create predictors and label predictors, label = input_sequences[:,:-1],input_sequences[:,-1] label = ku.to_categorical(label, num_classes=total_words) model = Sequential() model.add(Embedding(total_words, 100, input_length=max_sequence_len-1)) model.add(Bidirectional(LSTM(150, return_sequences=True))) model.add(Dropout(0.2)) model.add(LSTM(100)) model.add(Dense(total_words/2, activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) history = model.fit(predictors, label, epochs=100, verbose=1) import matplotlib.pyplot as plt acc = history.history['acc'] loss = history.history['loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'b', label='Training accuracy') plt.title('Training accuracy') plt.figure() plt.plot(epochs, loss, 'b', label='Training Loss') plt.title('Training loss') plt.legend() plt.show() seed_text = "Help me Obi Wan Kenobi, you're my only hope" next_words = 100 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre') predicted = model.predict_classes(token_list, verbose=0) output_word = "" for word, index in tokenizer.word_index.items(): if index == predicted: output_word = word break seed_text += " " + output_word print(seed_text) ```
github_jupyter
from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.keras import regularizers import tensorflow.keras.utils as ku import numpy as np tokenizer = Tokenizer() !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sonnets.txt \ -O /tmp/sonnets.txt data = open('/tmp/sonnets.txt').read() corpus = data.lower().split("\n") tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 # create input sequences using list of tokens input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i+1] input_sequences.append(n_gram_sequence) # pad sequences max_sequence_len = max([len(x) for x in input_sequences]) input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre')) # create predictors and label predictors, label = input_sequences[:,:-1],input_sequences[:,-1] label = ku.to_categorical(label, num_classes=total_words) model = Sequential() model.add(Embedding(total_words, 100, input_length=max_sequence_len-1)) model.add(Bidirectional(LSTM(150, return_sequences=True))) model.add(Dropout(0.2)) model.add(LSTM(100)) model.add(Dense(total_words/2, activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) history = model.fit(predictors, label, epochs=100, verbose=1) import matplotlib.pyplot as plt acc = history.history['acc'] loss = history.history['loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'b', label='Training accuracy') plt.title('Training accuracy') plt.figure() plt.plot(epochs, loss, 'b', label='Training Loss') plt.title('Training loss') plt.legend() plt.show() seed_text = "Help me Obi Wan Kenobi, you're my only hope" next_words = 100 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre') predicted = model.predict_classes(token_list, verbose=0) output_word = "" for word, index in tokenizer.word_index.items(): if index == predicted: output_word = word break seed_text += " " + output_word print(seed_text)
0.546254
0.515132
# 서울시 쓰레기 종량제 봉투 최대/최소 비용 구하기 ### 모듈 호출 ``` import json from urllib.parse import urlencode, quote_plus from urllib.request import urlopen, Request import pandas as pd from pandas.core.frame import DataFrame import numpy as np from pandas import json_normalize import requests ``` ### REST API 호출 ``` url = 'http://api.data.go.kr/openapi/tn_pubr_public_weighted_envlp_api' queryParams = '?' + urlencode( {quote_plus('ServiceKey'): 'APIKEY', quote_plus('pageNo'): '1', quote_plus('numOfRows'): '50', quote_plus('type'): 'json', quote_plus('CTPRVN_NM') : '서울특별시' }) request = Request(url + queryParams) request.get_method = lambda: 'GET' response_body = urlopen(request, timeout=60).read() print(response_body) ``` ### API 디코딩 및 JSON 파일 저장 ``` tempDic = json.loads(response_body) data = json.dumps(tempDic, indent=4, ensure_ascii=False) print(data) with open('./TrashPackPrice.json', 'w', encoding='utf-8') as file: file.write(data) ``` ### 데이터 전처리 ``` with open('./TrashPackPrice.json', 'r', encoding='utf-8') as file: json_sample = json.load(file) # 불필요한 정보 제거 json_data = pd.json_normalize(json_sample['response']['body']['items']) json_data = json_data.drop(['chrgDeptNm', 'phoneNumber', 'referenceDate', 'insttCode', 'weightedEnvlpMthd', 'ctprvnNm'], axis=1) json_data = json_data.drop(['price125', 'price120', 'price100'], axis=1) json_data = json_data.drop(['price1', 'price2', 'price3', 'price5', 'price1Half', 'price2Half', 'price75', 'price60', 'price50', 'price30', 'price10'], axis=1) # 사업장용 봉투 제거 temp_index = json_data[json_data['weightedEnvlpTrget'] == '사업장용'].index json_data = json_data.drop(temp_index) # 영업용 봉투 제거 temp_index = json_data[json_data['weightedEnvlpTrget'] == '영업용'].index json_data = json_data.drop(temp_index) # 용도 불문 기타 봉투 제거 temp_index = json_data[json_data['weightedEnvlpTrget'] == '기타'].index json_data = json_data.drop(temp_index) # 20리터 종량제 봉투를 제외한 나머지 봉투 제거 temp_index = json_data[json_data['price20'] == '0'].index json_data = json_data.drop(temp_index) # 음식물 쓰레기 봉투 제거 temp_index = json_data[json_data['weightedEnvlpPrpos'] == '음식물쓰레기'].index json_data = json_data.drop(temp_index) json_data ``` ### 퀵정렬 함수 선언 ``` def partition(list, start, end): pivot = list[start] left = start + 1 right = end done = False while not done: while left <= right and list[left] <= pivot: left += 1 while left <= right and list[right] > pivot: right -= 1 if right < left: done = True else: list[left], list[right] = list[right], list[left] list[start], list[right] = list[right], list[start] # 피봇 교환 return right def QuickSort(list, start, end): stack = [] stack.append(start) stack.append(end) while stack: end = stack.pop() start = stack.pop() pivot = partition(list, start, end) if pivot - 1 > start: stack.append(start) stack.append(pivot - 1) if pivot + 1 < end: stack.append(pivot + 1) stack.append(end) return list ``` ### 데이터프레임값 리스트로 변환 및 정렬 ``` price_list = list(np.array(json_data['price20'].tolist())) price_list = list(map(int, price_list)) price_list = QuickSort(price_list, 0, len(price_list)-1) price_list = list(map(str, price_list)) ``` ### 쓰레기 봉투 최소 값 ``` minPrice = price_list[0] search_index = json_data[json_data['price20'] == minPrice].index json_data.loc[search_index] ``` ### 쓰레기 봉투 최대 값 ``` maxPrice = price_list[len(price_list)-1] search_index = json_data[json_data['price20'] == maxPrice].index json_data.loc[search_index] ```
github_jupyter
import json from urllib.parse import urlencode, quote_plus from urllib.request import urlopen, Request import pandas as pd from pandas.core.frame import DataFrame import numpy as np from pandas import json_normalize import requests url = 'http://api.data.go.kr/openapi/tn_pubr_public_weighted_envlp_api' queryParams = '?' + urlencode( {quote_plus('ServiceKey'): 'APIKEY', quote_plus('pageNo'): '1', quote_plus('numOfRows'): '50', quote_plus('type'): 'json', quote_plus('CTPRVN_NM') : '서울특별시' }) request = Request(url + queryParams) request.get_method = lambda: 'GET' response_body = urlopen(request, timeout=60).read() print(response_body) tempDic = json.loads(response_body) data = json.dumps(tempDic, indent=4, ensure_ascii=False) print(data) with open('./TrashPackPrice.json', 'w', encoding='utf-8') as file: file.write(data) with open('./TrashPackPrice.json', 'r', encoding='utf-8') as file: json_sample = json.load(file) # 불필요한 정보 제거 json_data = pd.json_normalize(json_sample['response']['body']['items']) json_data = json_data.drop(['chrgDeptNm', 'phoneNumber', 'referenceDate', 'insttCode', 'weightedEnvlpMthd', 'ctprvnNm'], axis=1) json_data = json_data.drop(['price125', 'price120', 'price100'], axis=1) json_data = json_data.drop(['price1', 'price2', 'price3', 'price5', 'price1Half', 'price2Half', 'price75', 'price60', 'price50', 'price30', 'price10'], axis=1) # 사업장용 봉투 제거 temp_index = json_data[json_data['weightedEnvlpTrget'] == '사업장용'].index json_data = json_data.drop(temp_index) # 영업용 봉투 제거 temp_index = json_data[json_data['weightedEnvlpTrget'] == '영업용'].index json_data = json_data.drop(temp_index) # 용도 불문 기타 봉투 제거 temp_index = json_data[json_data['weightedEnvlpTrget'] == '기타'].index json_data = json_data.drop(temp_index) # 20리터 종량제 봉투를 제외한 나머지 봉투 제거 temp_index = json_data[json_data['price20'] == '0'].index json_data = json_data.drop(temp_index) # 음식물 쓰레기 봉투 제거 temp_index = json_data[json_data['weightedEnvlpPrpos'] == '음식물쓰레기'].index json_data = json_data.drop(temp_index) json_data def partition(list, start, end): pivot = list[start] left = start + 1 right = end done = False while not done: while left <= right and list[left] <= pivot: left += 1 while left <= right and list[right] > pivot: right -= 1 if right < left: done = True else: list[left], list[right] = list[right], list[left] list[start], list[right] = list[right], list[start] # 피봇 교환 return right def QuickSort(list, start, end): stack = [] stack.append(start) stack.append(end) while stack: end = stack.pop() start = stack.pop() pivot = partition(list, start, end) if pivot - 1 > start: stack.append(start) stack.append(pivot - 1) if pivot + 1 < end: stack.append(pivot + 1) stack.append(end) return list price_list = list(np.array(json_data['price20'].tolist())) price_list = list(map(int, price_list)) price_list = QuickSort(price_list, 0, len(price_list)-1) price_list = list(map(str, price_list)) minPrice = price_list[0] search_index = json_data[json_data['price20'] == minPrice].index json_data.loc[search_index] maxPrice = price_list[len(price_list)-1] search_index = json_data[json_data['price20'] == maxPrice].index json_data.loc[search_index]
0.257952
0.629917
# `ilastikrag` Quickstart Tutorial ``` %matplotlib inline import matplotlib import pylab pylab.rcParams['figure.figsize'] = (20.0, 20.0) from ilastikrag.util import colorize_labels from collections import OrderedDict import numpy as np import pandas as pd import vigra import ilastikrag ``` ## Load Data ``` # Note: Rag inputs must be VigraArray (with axistags) grayscale = vigra.impex.readImage('grayscale.png', dtype='NATIVE').withAxes('yx') membranes = vigra.impex.readImage('membranes.png', dtype='NATIVE').withAxes('yx') superpixels = vigra.impex.readImage('superpixels.png', dtype=np.uint32).withAxes('yx') vigra.multiImshow(OrderedDict( [('grayscale', (grayscale, 'img')), ('membranes', (membranes, 'img')), ('superpixels', (colorize_labels(superpixels), 'img'))]), (1,3)) ``` ## Create Rag ``` rag = ilastikrag.Rag(superpixels) ``` ## Compute features (grayscale channel) ``` grayscale_features = rag.compute_features(grayscale, ['standard_edge_mean', 'standard_sp_quantiles_50']) grayscale_features[:5] ``` ## Compute features (membrane channel) ``` membrane_features = rag.compute_features(membranes, ['standard_edge_quantiles']) membrane_features[:5] ``` ## Combine feature tables into `ndarray` ``` grayscale_feature_array = grayscale_features.iloc[:,2:].values # drop sp columns membrane_feature_array = membrane_features.iloc[:,2:].values all_features_array = np.concatenate((grayscale_feature_array, membrane_feature_array), axis=1) print all_features_array[:5] ``` ## Show supported feature names ``` for feature_name in rag.supported_features(): print feature_name ``` ## GUI: Ask the user for feature selections Note: `ilastikrag.gui` requires `pyqt4` ``` import ilastikrag.gui from ilastikrag.gui import FeatureSelectionDialog default_selections = { 'grayscale' : ['standard_sp_count', 'standard_sp_quantiles_50', 'edgeregion_edge_regionradii_0'], 'membranes' : ['standard_edge_mean', 'standard_edge_minimum', 'standard_edge_variance'] } user_selections = FeatureSelectionDialog.launch(['grayscale', 'membranes'], rag.supported_features(), default_selections) for channel_name, feature_names in user_selections.items(): print "Selections for '{}':".format(channel_name) for feature_name in feature_names: print " {}".format(feature_name) print "" ```
github_jupyter
%matplotlib inline import matplotlib import pylab pylab.rcParams['figure.figsize'] = (20.0, 20.0) from ilastikrag.util import colorize_labels from collections import OrderedDict import numpy as np import pandas as pd import vigra import ilastikrag # Note: Rag inputs must be VigraArray (with axistags) grayscale = vigra.impex.readImage('grayscale.png', dtype='NATIVE').withAxes('yx') membranes = vigra.impex.readImage('membranes.png', dtype='NATIVE').withAxes('yx') superpixels = vigra.impex.readImage('superpixels.png', dtype=np.uint32).withAxes('yx') vigra.multiImshow(OrderedDict( [('grayscale', (grayscale, 'img')), ('membranes', (membranes, 'img')), ('superpixels', (colorize_labels(superpixels), 'img'))]), (1,3)) rag = ilastikrag.Rag(superpixels) grayscale_features = rag.compute_features(grayscale, ['standard_edge_mean', 'standard_sp_quantiles_50']) grayscale_features[:5] membrane_features = rag.compute_features(membranes, ['standard_edge_quantiles']) membrane_features[:5] grayscale_feature_array = grayscale_features.iloc[:,2:].values # drop sp columns membrane_feature_array = membrane_features.iloc[:,2:].values all_features_array = np.concatenate((grayscale_feature_array, membrane_feature_array), axis=1) print all_features_array[:5] for feature_name in rag.supported_features(): print feature_name import ilastikrag.gui from ilastikrag.gui import FeatureSelectionDialog default_selections = { 'grayscale' : ['standard_sp_count', 'standard_sp_quantiles_50', 'edgeregion_edge_regionradii_0'], 'membranes' : ['standard_edge_mean', 'standard_edge_minimum', 'standard_edge_variance'] } user_selections = FeatureSelectionDialog.launch(['grayscale', 'membranes'], rag.supported_features(), default_selections) for channel_name, feature_names in user_selections.items(): print "Selections for '{}':".format(channel_name) for feature_name in feature_names: print " {}".format(feature_name) print ""
0.337968
0.890723
``` #base libraries import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from timeit import default_timer as timer from pprint import pprint from time import time import copy from pathlib import Path import datetime from sklearn.metrics import mean_squared_log_error from sklearn.ensemble import RandomForestRegressor from lightgbm import LGBMRegressor from tqdm import tqdm_notebook as tqdm from sklearn.metrics import confusion_matrix,classification_report from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import statsmodels.api as sm #other visualizations import chart_studio.plotly as py import plotly.graph_objs as go import plotly.offline as pyoff from plotly.offline import init_notebook_mode, iplot # Pytorch libs import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet, DeepAR, RecurrentNetwork from pytorch_forecasting.data import GroupNormalizer from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters from pytorch_forecasting.data.encoders import NaNLabelEncoder import warnings warnings.filterwarnings('ignore') sns.set() #start importing data data = pd.read_csv('../Databases/final_df.csv', dtype={ 'ProductFamily_ID': np.int16, 'ProductCategory_ID': np.int16, 'ProductBrand_ID': np.int16, 'ProductName_ID': np.int16, 'ProductPackSKU_ID': np.int16, 'Point-of-Sale_ID': np.int16, 'Value_units': np.int64, 'Value_price': np.float32, 'Unit_Price': np.float32, 'Retail_price': np.float32, 'Is_Promo': np.int16, 'cluster_value': np.int16, 'cluster_product': np.int16, }) del data['Unnamed: 0'] data data.describe(include = 'all') #convert date to datetime format data['Date'] = pd.to_datetime(data['Date']) data.info() print('Min date in DF: %s' % data['Date'].min().date()) print('Max date in DF: %s' % data['Date'].max().date()) ``` Daily data is too sparse, we will start by compressing into weekly data. ``` #Aggregate by week grouped_data = data.groupby([pd.Grouper(key='Date', freq='SM'), 'Point-of-Sale_ID', 'ProductName_ID', 'ProductPackSKU_ID']).agg({ 'Value_units': 'sum', }).reset_index().sort_values('Date') #create cluster dict to map later prod_dict = data.set_index("Point-of-Sale_ID")["cluster_product"].to_dict() val_dict = data.set_index("Point-of-Sale_ID")["cluster_value"].to_dict() del data ``` #### We already know that there is a difference between the number of SKUs and the number of Product Names: there are more SKUs than Names. In electronics retail, it is important to remember that, from an SKU standpoint, different products may be considered the same - as in, they are an updated model. We will look into which product_SKU - product-name pairs do not share a 1 to 1 relationship. For any given Product name, we should see a multi-modal distribution throughout time - as in different peaks from different SKUs as time evolves. ``` #check each product's last date filtered = grouped_data.filter(['Date', 'ProductName_ID', 'Point-of-Sale_ID']) #get indexes of first record of of a product sale and the last record of a product sale first_sales = filtered.sort_values('Date').drop_duplicates(['ProductName_ID'], keep='first').drop('Point-of-Sale_ID', axis = 1) last_sales = filtered.sort_values('Date').drop_duplicates(['ProductName_ID'], keep='last').drop('Point-of-Sale_ID', axis = 1) print(f'Indexes and name of first sales: {first_sales}.') print(f'Indexes and name of last sales: {last_sales}.') #Dealing with very recent products lastdate_treshold = "2018-09-01" dispensable_prods = list(last_sales[last_sales['Date'] < lastdate_treshold]['ProductName_ID']) #getting list of products that have not sold len(dispensable_prods) #Dealing with very recent products firstdate_treshold = "2019-10-15" new_prods = list(first_sales[first_sales['Date'] > firstdate_treshold]['ProductName_ID']) #we have 8 products whose first recorded sale was in the 2 weeks leading up to the end of dataset: #we will monitor these products very closely len(new_prods) #removing all observations of dispensable products that will no longer be sold - we are not working so far with product embeddings grouped_data = grouped_data.loc[~grouped_data['ProductName_ID'].isin(dispensable_prods)] new_pivot = pd.pivot_table(grouped_data, index=['Point-of-Sale_ID', 'ProductName_ID'], columns = 'Date', values = 'Value_units', aggfunc='sum').fillna(0) new_pivot = new_pivot.reset_index() new_pivot melt = new_pivot.melt(id_vars=['Point-of-Sale_ID', 'ProductName_ID'] , var_name='Date', value_name='Value_units') grouped_data = melt[:] del melt del new_pivot #adding additional columns concerning month, dow, year and week number in the year grouped_data["month"] = grouped_data.Date.dt.month.astype(str).astype("category") grouped_data["week_nr"] = grouped_data.Date.dt.isocalendar().week.astype(str).astype("category") # 5 and 6 correspond to Sat and Sun grouped_data["year"] = grouped_data.Date.dt.year.astype(str).astype("category") #also creating log of units sold - summing small value to ensure that value never reaches 0 grouped_data["log_units_sold"] = np.log(grouped_data["Value_units"] + 1e-8).astype(np.float64) #getting other relevant weekly information #weekly avg sales sales(units) grouped_data["avg_sales_of_POS"] = grouped_data.groupby(["Date", "Point-of-Sale_ID"]).Value_units.transform("mean") grouped_data["avg_sales_of_Prod_Name"] = grouped_data.groupby(["Date", "ProductName_ID"]).Value_units.transform("mean") #converting final variables to categoricals for consistency grouped_data["ProductName_ID"] = grouped_data["ProductName_ID"].astype(str).astype("category") grouped_data["Point-of-Sale_ID"] = grouped_data["Point-of-Sale_ID"].astype(str).astype("category") #check everything again grouped_data #work on a chunk - data from 2018 and 2019 to expedite training grouped_data_transformer = grouped_data.loc[(grouped_data["Date"] >= datetime.datetime(2018,10,1))] grouped_data = grouped_data grouped_data = grouped_data_transformer #create series of dates #create weekly index of dates between start and end-date and index it date_series = grouped_data["Date"].drop_duplicates().sort_values() date_series.index = np.arange(1, len(date_series) + 1) #we now merge dfs on dates to_merge = date_series.to_frame().reset_index() #merge on dates grouped_data = grouped_data.merge(to_merge, how = 'inner') grouped_data.rename(columns = {'index': "time_idx"}, inplace = True) grouped_data.info() ``` ## Modeling stage We will now design different possible candidate models to perform our time series forecast. For that, we will consider different approaches. We'll use the pytorch forecasting library for this. 1. The predicted sales becomes last know value - benchmarking model. 2. Deep Learning model As measures of prediction accuracy, we are looking to minimize the difference between prediction and targets: Accuracy - we can consider the following possibilities as valid metrics to measure model accuracy. - MAPE (Mean Absolute Percentage Error), - MAE (Mean Absolute Error) - MSE (Mean Squared Error) The implementation of the Pytorch forecasting models is inspired in the Pytorch Forecasting Transformers tutorial with the Stallion dataset: https://pytorch-forecasting.readthedocs.io/en/latest/tutorials/stallion.html ``` #Creating Dataset as timeseries Dataset: https://pytorch-forecasting.readthedocs.io/en/latest/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html#pytorch_forecasting.data.timeseries.TimeSeriesDataSet # create the dataset from the pandas dataframe max_prediction_length = 3 #objective is to predict 6 weeks into the future max_encoder_length = 12 #take previous 12 weeks (roughly 3 months of sales) context_length = max_encoder_length prediction_length = max_prediction_length training_cutoff = grouped_data["time_idx"].max() - max_prediction_length training = TimeSeriesDataSet( grouped_data[lambda x: x.time_idx <= training_cutoff], time_idx="time_idx", target="Value_units", group_ids=["Point-of-Sale_ID", "ProductName_ID"], # only unknown variable is "value" - and N-Beats can also not take any additional variables time_varying_unknown_reals=["Value_units"], max_encoder_length=context_length, max_prediction_length=prediction_length, ) validation = TimeSeriesDataSet.from_dataset(training, grouped_data, min_prediction_idx=training_cutoff + 1) # create dataloaders for model - batch training that does not burn a graphics card batch_size = 128 # set this between 32 to 128 train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers= 2 ) val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers= 4) ``` ## Baseline model Setting up a base model - prediction is last known sale. ``` # calculate baseline mean absolute error, i.e. predict next value as the last available value from the history actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)]) baseline_predictions = Baseline().predict(val_dataloader) (actuals - baseline_predictions).abs().mean().item() SMAPE()(baseline_predictions, actuals) raw_predictions, x = Baseline().predict(val_dataloader, mode="raw", return_x=True) for idx in range(10): # plot 10 examples Baseline().plot_prediction(x, raw_predictions, idx=idx, add_loss_to_title=True); ``` ## Autorregressive model Setting up a base model - prediction is last known sale. ``` dars = DeepAR.from_dataset( training, learning_rate=4e-3, log_interval=10, log_val_interval=1, weight_decay=1e-2, ) trainer.fit( dars, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, ) print(f"Number of parameters in network: {dars.size()/1e3:.1f}k") # calculate mean absolute error on validation set actuals = torch.cat([y[0] for x, y in iter(val_dataloader)]) deep_ar_predictions = dars.predict(val_dataloader) (actuals - deep_ar_predictions).abs().mean() SMAPE()(deep_ar_predictions, actuals) raw_predictions, x = dars.predict(val_dataloader, mode="raw", return_x=True) raw_predictions x ```
github_jupyter
#base libraries import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from timeit import default_timer as timer from pprint import pprint from time import time import copy from pathlib import Path import datetime from sklearn.metrics import mean_squared_log_error from sklearn.ensemble import RandomForestRegressor from lightgbm import LGBMRegressor from tqdm import tqdm_notebook as tqdm from sklearn.metrics import confusion_matrix,classification_report from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import statsmodels.api as sm #other visualizations import chart_studio.plotly as py import plotly.graph_objs as go import plotly.offline as pyoff from plotly.offline import init_notebook_mode, iplot # Pytorch libs import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet, DeepAR, RecurrentNetwork from pytorch_forecasting.data import GroupNormalizer from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters from pytorch_forecasting.data.encoders import NaNLabelEncoder import warnings warnings.filterwarnings('ignore') sns.set() #start importing data data = pd.read_csv('../Databases/final_df.csv', dtype={ 'ProductFamily_ID': np.int16, 'ProductCategory_ID': np.int16, 'ProductBrand_ID': np.int16, 'ProductName_ID': np.int16, 'ProductPackSKU_ID': np.int16, 'Point-of-Sale_ID': np.int16, 'Value_units': np.int64, 'Value_price': np.float32, 'Unit_Price': np.float32, 'Retail_price': np.float32, 'Is_Promo': np.int16, 'cluster_value': np.int16, 'cluster_product': np.int16, }) del data['Unnamed: 0'] data data.describe(include = 'all') #convert date to datetime format data['Date'] = pd.to_datetime(data['Date']) data.info() print('Min date in DF: %s' % data['Date'].min().date()) print('Max date in DF: %s' % data['Date'].max().date()) #Aggregate by week grouped_data = data.groupby([pd.Grouper(key='Date', freq='SM'), 'Point-of-Sale_ID', 'ProductName_ID', 'ProductPackSKU_ID']).agg({ 'Value_units': 'sum', }).reset_index().sort_values('Date') #create cluster dict to map later prod_dict = data.set_index("Point-of-Sale_ID")["cluster_product"].to_dict() val_dict = data.set_index("Point-of-Sale_ID")["cluster_value"].to_dict() del data #check each product's last date filtered = grouped_data.filter(['Date', 'ProductName_ID', 'Point-of-Sale_ID']) #get indexes of first record of of a product sale and the last record of a product sale first_sales = filtered.sort_values('Date').drop_duplicates(['ProductName_ID'], keep='first').drop('Point-of-Sale_ID', axis = 1) last_sales = filtered.sort_values('Date').drop_duplicates(['ProductName_ID'], keep='last').drop('Point-of-Sale_ID', axis = 1) print(f'Indexes and name of first sales: {first_sales}.') print(f'Indexes and name of last sales: {last_sales}.') #Dealing with very recent products lastdate_treshold = "2018-09-01" dispensable_prods = list(last_sales[last_sales['Date'] < lastdate_treshold]['ProductName_ID']) #getting list of products that have not sold len(dispensable_prods) #Dealing with very recent products firstdate_treshold = "2019-10-15" new_prods = list(first_sales[first_sales['Date'] > firstdate_treshold]['ProductName_ID']) #we have 8 products whose first recorded sale was in the 2 weeks leading up to the end of dataset: #we will monitor these products very closely len(new_prods) #removing all observations of dispensable products that will no longer be sold - we are not working so far with product embeddings grouped_data = grouped_data.loc[~grouped_data['ProductName_ID'].isin(dispensable_prods)] new_pivot = pd.pivot_table(grouped_data, index=['Point-of-Sale_ID', 'ProductName_ID'], columns = 'Date', values = 'Value_units', aggfunc='sum').fillna(0) new_pivot = new_pivot.reset_index() new_pivot melt = new_pivot.melt(id_vars=['Point-of-Sale_ID', 'ProductName_ID'] , var_name='Date', value_name='Value_units') grouped_data = melt[:] del melt del new_pivot #adding additional columns concerning month, dow, year and week number in the year grouped_data["month"] = grouped_data.Date.dt.month.astype(str).astype("category") grouped_data["week_nr"] = grouped_data.Date.dt.isocalendar().week.astype(str).astype("category") # 5 and 6 correspond to Sat and Sun grouped_data["year"] = grouped_data.Date.dt.year.astype(str).astype("category") #also creating log of units sold - summing small value to ensure that value never reaches 0 grouped_data["log_units_sold"] = np.log(grouped_data["Value_units"] + 1e-8).astype(np.float64) #getting other relevant weekly information #weekly avg sales sales(units) grouped_data["avg_sales_of_POS"] = grouped_data.groupby(["Date", "Point-of-Sale_ID"]).Value_units.transform("mean") grouped_data["avg_sales_of_Prod_Name"] = grouped_data.groupby(["Date", "ProductName_ID"]).Value_units.transform("mean") #converting final variables to categoricals for consistency grouped_data["ProductName_ID"] = grouped_data["ProductName_ID"].astype(str).astype("category") grouped_data["Point-of-Sale_ID"] = grouped_data["Point-of-Sale_ID"].astype(str).astype("category") #check everything again grouped_data #work on a chunk - data from 2018 and 2019 to expedite training grouped_data_transformer = grouped_data.loc[(grouped_data["Date"] >= datetime.datetime(2018,10,1))] grouped_data = grouped_data grouped_data = grouped_data_transformer #create series of dates #create weekly index of dates between start and end-date and index it date_series = grouped_data["Date"].drop_duplicates().sort_values() date_series.index = np.arange(1, len(date_series) + 1) #we now merge dfs on dates to_merge = date_series.to_frame().reset_index() #merge on dates grouped_data = grouped_data.merge(to_merge, how = 'inner') grouped_data.rename(columns = {'index': "time_idx"}, inplace = True) grouped_data.info() #Creating Dataset as timeseries Dataset: https://pytorch-forecasting.readthedocs.io/en/latest/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html#pytorch_forecasting.data.timeseries.TimeSeriesDataSet # create the dataset from the pandas dataframe max_prediction_length = 3 #objective is to predict 6 weeks into the future max_encoder_length = 12 #take previous 12 weeks (roughly 3 months of sales) context_length = max_encoder_length prediction_length = max_prediction_length training_cutoff = grouped_data["time_idx"].max() - max_prediction_length training = TimeSeriesDataSet( grouped_data[lambda x: x.time_idx <= training_cutoff], time_idx="time_idx", target="Value_units", group_ids=["Point-of-Sale_ID", "ProductName_ID"], # only unknown variable is "value" - and N-Beats can also not take any additional variables time_varying_unknown_reals=["Value_units"], max_encoder_length=context_length, max_prediction_length=prediction_length, ) validation = TimeSeriesDataSet.from_dataset(training, grouped_data, min_prediction_idx=training_cutoff + 1) # create dataloaders for model - batch training that does not burn a graphics card batch_size = 128 # set this between 32 to 128 train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers= 2 ) val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers= 4) # calculate baseline mean absolute error, i.e. predict next value as the last available value from the history actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)]) baseline_predictions = Baseline().predict(val_dataloader) (actuals - baseline_predictions).abs().mean().item() SMAPE()(baseline_predictions, actuals) raw_predictions, x = Baseline().predict(val_dataloader, mode="raw", return_x=True) for idx in range(10): # plot 10 examples Baseline().plot_prediction(x, raw_predictions, idx=idx, add_loss_to_title=True); dars = DeepAR.from_dataset( training, learning_rate=4e-3, log_interval=10, log_val_interval=1, weight_decay=1e-2, ) trainer.fit( dars, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, ) print(f"Number of parameters in network: {dars.size()/1e3:.1f}k") # calculate mean absolute error on validation set actuals = torch.cat([y[0] for x, y in iter(val_dataloader)]) deep_ar_predictions = dars.predict(val_dataloader) (actuals - deep_ar_predictions).abs().mean() SMAPE()(deep_ar_predictions, actuals) raw_predictions, x = dars.predict(val_dataloader, mode="raw", return_x=True) raw_predictions x
0.395251
0.689588
``` # Quick utility to embed the videos below from IPython.display import YouTubeVideo def embed_video(index, playlist='PLYCpMb24GpOC704uO9svUrihl-HY1tTJJ'): return YouTubeVideo('', index=index - 1, list=playlist, width=600, height=350) embed_video(1) URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD' import os import pandas as pd from urllib.request import urlretrieve #urlretrieve(URL, 'Freemont.csv') def get_freemont_data(filename = 'Freemont.csv', url = URL, force_download = False): if force_download or not os.path.exists(filename): urlretrieve(url, filename) data = pd.read_csv('Freemont.csv', index_col='Date', parse_dates=True) data.columns = ['West', 'East'] data['Total'] = data['West'] + data['East'] return data data = get_freemont_data() !head Freemont.csv import pandas as pd data = pd.read_csv('Freemont.csv', index_col='Date', parse_dates=True) data.head() %matplotlib inline data.resample('W').sum().plot() embed_video(2) import matplotlib.pyplot as plt plt.style.use('seaborn') data.columns = ['West', 'East'] data.resample('W').sum().plot() data['Total'] = data['West'] + data['East'] ax = data.resample('D').sum().rolling(365).sum().plot() ax.set_ylim(0, None); data.groupby(data.index.time).mean().plot(); pivoted = data.pivot_table('Total', index = data.index.time, columns = data.index.date) pivoted.iloc[:5, :5] pivoted.plot(legend = False, alpha = 0.01) embed_video(3) ``` kernel > Restart & Run All (to see that all works in sequence...) then, when all works... move it into Github Create a new repository and copy the download link for terminal ----- JohansComputer:~ jfr$ git clone https://github.com/johanfrisk/reproducible_data_analysis.git Cloning into 'reproducible_data_analysis'... remote: Counting objects: 5, done. remote: Compressing objects: 100% (5/5), done. remote: Total 5 (delta 0), reused 0 (delta 0), pack-reused 0 Unpacking objects: 100% (5/5), done. Checking connectivity... done. JohansComputer:~ jfr$ pwd /Users/jfr JohansComputer:~ jfr$ mv Desktop/Reproducible_data_analysis.ipynb reproducible_data_analysis JohansComputer:~ jfr$ cd repr* JohansComputer:reproducible_data_analysis jfr$ git status On branch master Your branch is up-to-date with 'origin/master'. Untracked files: (use "git add <file>..." to include in what will be committed) Reproducible_data_analysis.ipynb nothing added to commit but untracked files present (use "git add" to track) JohansComputer:reproducible_data_analysis jfr$ git add Reproducible_data_analysis.ipynb JohansComputer:reproducible_data_analysis jfr$ git commit -m "Add initial analysis notebook" [master 3e5c89a] Add initial analysis notebook 1 file changed, 577 insertions(+) create mode 100644 Reproducible_data_analysis.ipynb JohansComputer:reproducible_data_analysis jfr$ git push origin master Counting objects: 3, done. Delta compression using up to 8 threads. Compressing objects: 100% (3/3), done. Writing objects: 100% (3/3), 241.34 KiB | 0 bytes/s, done. Total 3 (delta 0), reused 0 (delta 0) To https://github.com/johanfrisk/reproducible_data_analysis.git 21dfa38..3e5c89a master -> master JohansComputer:reproducible_data_analysis jfr$ JohansComputer:reproducible_data_analysis jfr$ git status On branch master Your branch is up-to-date with 'origin/master'. nothing to commit, working directory clean JohansComputer:reproducible_data_analysis jfr$ ``` embed_video(4) ``` add the database 'Freemont.csv' to the gitignore file... so that we don't backup large files by accident at the end in gitignore add: #data <BR> Freemont.csv
github_jupyter
# Quick utility to embed the videos below from IPython.display import YouTubeVideo def embed_video(index, playlist='PLYCpMb24GpOC704uO9svUrihl-HY1tTJJ'): return YouTubeVideo('', index=index - 1, list=playlist, width=600, height=350) embed_video(1) URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD' import os import pandas as pd from urllib.request import urlretrieve #urlretrieve(URL, 'Freemont.csv') def get_freemont_data(filename = 'Freemont.csv', url = URL, force_download = False): if force_download or not os.path.exists(filename): urlretrieve(url, filename) data = pd.read_csv('Freemont.csv', index_col='Date', parse_dates=True) data.columns = ['West', 'East'] data['Total'] = data['West'] + data['East'] return data data = get_freemont_data() !head Freemont.csv import pandas as pd data = pd.read_csv('Freemont.csv', index_col='Date', parse_dates=True) data.head() %matplotlib inline data.resample('W').sum().plot() embed_video(2) import matplotlib.pyplot as plt plt.style.use('seaborn') data.columns = ['West', 'East'] data.resample('W').sum().plot() data['Total'] = data['West'] + data['East'] ax = data.resample('D').sum().rolling(365).sum().plot() ax.set_ylim(0, None); data.groupby(data.index.time).mean().plot(); pivoted = data.pivot_table('Total', index = data.index.time, columns = data.index.date) pivoted.iloc[:5, :5] pivoted.plot(legend = False, alpha = 0.01) embed_video(3) embed_video(4)
0.474144
0.576512
# Add phrase and clause nodes The data for clauses and phrases comes from csv files prepared by Martijn Naaijer. We compile interpret the CSV and compile it into input data for the [modify](https://annotation.github.io/text-fabric/compose/modify.html) function. Adding phrases and clauses is just a single call to that modify function, which turns DSS version 0.8 into 0.9. ``` %load_ext autoreload %autoreload 2 import pprint as pp from tf.app import use from addBoundariesFromNaaijer import readBoundariesPlain PP = pp.PrettyPrinter(indent=2) def pprint(x): PP.pprint(x) A = use("dss:hot", checkout="clone", hoist=globals(), version="0.8") ``` # Check We check whether the ISA clause/word boundary file mentions the correct word nodes. First a visual inspection of the first 10 words. ``` F.otype.v(firstWord) # noqa F821 T.formats FMT = "text-trans-full" firstWord = 1894861 for w in range(firstWord, firstWord + 10): rep = T.text(w, fmt=FMT) print(f"{w} = {rep}") ``` The data file reads this ``` id,scroll,book,verse,word,clause_nr,phrase_nr 1894861,1Qisaa,Isaiah,1,XZWN,1,1 1894862,1Qisaa,Isaiah,1,JC<JHW,1,1 1894863,1Qisaa,Isaiah,1,BN,1,1 1894864,1Qisaa,Isaiah,1,>MWY,1,1 1894865,1Qisaa,Isaiah,1,>CR,2,2 1894866,1Qisaa,Isaiah,1,XZH,2,3 1894867,1Qisaa,Isaiah,1,<L,2,4 1894868,1Qisaa,Isaiah,1,JHWDH,2,4 1894869,1Qisaa,Isaiah,1,W,2,4 1894870,1Qisaa,Isaiah,1,JRWCLM,2,4 ``` We are going to check whether: 1. the word nodes (first column) increase by one for each subsequent line 2. the word in the file equals the word according to TF, modulo a small transformation: * C in the file stands for # * we ignore case differences (relevant in the last letter) First a visual check. ``` data = readBoundariesPlain() pprint(list(data.keys())[0:10]) pprint(list(data.values())[0:10]) T.text(1894879, fmt="text-trans-full") END = "00 " prevW = None good = True for (w, (word, book, clNr, phrNr)) in data.items(): wordTrans = word.replace("C", "#").replace("F", "#") wordTf = T.text(w, fmt=FMT).rstrip().upper().replace("'", "") if wordTrans != wordTf: print(f"irregularity at {w}: `{wordTrans}` != `{wordTf}`") good = False break if prevW is not None and w != prevW + 1: if not (prevW + 2 == w and T.text(prevW + 1, fmt=FMT) == END): print(f"irregularity at {w} following {prevW}") good = False break prevW = w if good: print("all is well") ``` # Run We can now run the script to produce a new DSS dataset with extra node types: `clause` and `phrase`, both with feature `nr`. **NB**: The script specifies the source version and the destination version for the new TF dataset. We can run it on the commandline, or right here, in the notebook. ``` !python3 addBoundariesFromNaaijer.py ``` # Test Let's see whether the first word is now contained in a phrase and in a clause: ``` A = use("dss:clone", checkout="clone", hoist=globals(), version="0.9") L.u(firstWord) for n in L.u(firstWord): print(n, F.otype.v(n)) ``` This is a fragment of the data file: ``` 1894900,1Qisaa,Isaiah,3,QWNJHW,11,24 1894901,1Qisaa,Isaiah,3,W,12,25 1894902,1Qisaa,Isaiah,3,XMWR,12,26 1894903,1Qisaa,Isaiah,3,>BWS,12,27 1894904,1Qisaa,Isaiah,3,B<LJW,12,27 1894905,1Qisaa,Isaiah,3,JFR>L,13,28 ``` We gather the words belonging to clause 12: ``` c = F.otype.s("clause")[11] print(F.nr.v(c)) print(L.d(c, otype="word")) ``` Correct! We gather the words belonging to phrase 27: ``` p = F.otype.s("phrase")[26] print(F.nr.v(p)) print(L.d(p, otype="word")) ``` Correct!
github_jupyter
%load_ext autoreload %autoreload 2 import pprint as pp from tf.app import use from addBoundariesFromNaaijer import readBoundariesPlain PP = pp.PrettyPrinter(indent=2) def pprint(x): PP.pprint(x) A = use("dss:hot", checkout="clone", hoist=globals(), version="0.8") F.otype.v(firstWord) # noqa F821 T.formats FMT = "text-trans-full" firstWord = 1894861 for w in range(firstWord, firstWord + 10): rep = T.text(w, fmt=FMT) print(f"{w} = {rep}") id,scroll,book,verse,word,clause_nr,phrase_nr 1894861,1Qisaa,Isaiah,1,XZWN,1,1 1894862,1Qisaa,Isaiah,1,JC<JHW,1,1 1894863,1Qisaa,Isaiah,1,BN,1,1 1894864,1Qisaa,Isaiah,1,>MWY,1,1 1894865,1Qisaa,Isaiah,1,>CR,2,2 1894866,1Qisaa,Isaiah,1,XZH,2,3 1894867,1Qisaa,Isaiah,1,<L,2,4 1894868,1Qisaa,Isaiah,1,JHWDH,2,4 1894869,1Qisaa,Isaiah,1,W,2,4 1894870,1Qisaa,Isaiah,1,JRWCLM,2,4 data = readBoundariesPlain() pprint(list(data.keys())[0:10]) pprint(list(data.values())[0:10]) T.text(1894879, fmt="text-trans-full") END = "00 " prevW = None good = True for (w, (word, book, clNr, phrNr)) in data.items(): wordTrans = word.replace("C", "#").replace("F", "#") wordTf = T.text(w, fmt=FMT).rstrip().upper().replace("'", "") if wordTrans != wordTf: print(f"irregularity at {w}: `{wordTrans}` != `{wordTf}`") good = False break if prevW is not None and w != prevW + 1: if not (prevW + 2 == w and T.text(prevW + 1, fmt=FMT) == END): print(f"irregularity at {w} following {prevW}") good = False break prevW = w if good: print("all is well") !python3 addBoundariesFromNaaijer.py A = use("dss:clone", checkout="clone", hoist=globals(), version="0.9") L.u(firstWord) for n in L.u(firstWord): print(n, F.otype.v(n)) 1894900,1Qisaa,Isaiah,3,QWNJHW,11,24 1894901,1Qisaa,Isaiah,3,W,12,25 1894902,1Qisaa,Isaiah,3,XMWR,12,26 1894903,1Qisaa,Isaiah,3,>BWS,12,27 1894904,1Qisaa,Isaiah,3,B<LJW,12,27 1894905,1Qisaa,Isaiah,3,JFR>L,13,28 c = F.otype.s("clause")[11] print(F.nr.v(c)) print(L.d(c, otype="word")) p = F.otype.s("phrase")[26] print(F.nr.v(p)) print(L.d(p, otype="word"))
0.231962
0.85318
``` def warn(*args, **kwargs): pass import warnings warnings.warn = warn # to remove warnings import pandas as pd, numpy as np, matplotlib.pyplot as plt % matplotlib notebook original_data = pd.read_csv ('train.csv') final_test_set = pd.read_csv ('test.csv') ''' The target values aren't given (they're on the Kaggle Website), so the final_test_set is not used to check for overfitting. ''' original_data.head() original_data.shape description = original_data.describe() description.round (2) def create_dict_of_var_and_description (file_name): ''' This is used to create a dictionary from the Kaggle document that describes all the variables ''' open_file = open(file_name) file = open_file.read() open_file.close() dict = {} for var_descrip in file.split('\n'): var, description = var_descrip.split(',', 1) dict[var] = description return dict var_file_name = 'Var_name_and_description' dict_of_var = create_dict_of_var_and_description (var_file_name) dict_of_var['v2a1'] # example def find_columns_with_string_values (df): ''' This prints the variable that contains strings (it can contain strings and numbers), the description, and the unique values. ''' dtypes_of_data = pd.DataFrame (df.dtypes) list_of_str_var = dtypes_of_data [dtypes_of_data == object].dropna().index for var in list_of_str_var[1:]: # ignore the first column 'Id' because that was not in the Kaggle Variable File print (var, ':', dict_of_var[var], '\n', df[var].unique()) find_columns_with_string_values (original_data) from sklearn.model_selection import train_test_split train, test = train_test_split (original_data, test_size=0.2, random_state=42) X, y = train.drop('Target', axis=1), train['Target'].copy() X_test, y_test = test.drop('Target', axis=1), test['Target'].copy() def deal_with_string_values (X): ''' For example, the columns that contain no and yes are dependency, edjefe, edjefa. For edjefe and edjefa, yes means 1, and no means 0. For dependency, We assume that when people answered the survey, no means 0 and yes means the average. ''' replaced_strings_df = X.replace ('no', 0).replace ('yes', np.nan).drop (['Id', 'idhogar'], axis=1) replaced_strings_df = replaced_strings_df.fillna ({'edjefe': 1, 'edjefa': 1}) from sklearn.preprocessing import Imputer imputer = Imputer(strategy='median') replace_NA_with_median = imputer.fit_transform (replaced_strings_df) return pd.DataFrame (replace_NA_with_median, columns = replaced_strings_df.columns) def drop_variables_with_low_corr_with_target (X, original_data, threshold): ''' If the correlation of a certain variable with the target is less than the threshold, we drop the variables from the dataframe''' train = original_data.copy() corr_matrix = train.corr() corr_target = corr_matrix['Target'].sort_values(ascending=False) over_threshold_var = corr_target[corr_target.abs() < threshold].index return X.drop (over_threshold_var, axis=1), over_threshold_var def Clean_Data (X, original_data, threshold): ''' Combines the above two functions, and returns a dataframe and the dropped variables (so we can drop the same variables during the test set, and the final test set)''' replace_string_transform = deal_with_string_values (X) drop_low_corr, dropped_var = drop_variables_with_low_corr_with_target (replace_string_transform, original_data, threshold) return drop_low_corr, dropped_var ''' We use standard scaler to account for different scales, and imputer to save the median. As a result, we could transform the test data using the median, variance, and mean from the train set, and not that of the test set. ''' from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer, StandardScaler trans_pipeline = Pipeline([ ('imputer', Imputer(strategy='median')), ('std_scaler', StandardScaler()) ]) X_Cleaned, Dropped_Var = Clean_Data (X, original_data, .1) X_Cleaned_and_transformed = pd.DataFrame (trans_pipeline.fit_transform (X_Cleaned), columns = X_Cleaned.columns) X_Cleaned_and_transformed.head() def display_var_with_missing_values (description): var = description.copy() count = description.loc['count'] with pd.option_context('display.max_rows', None, 'display.max_columns', 3): return pd.DataFrame ((count [count != 9557.0])) def get_description_of_values_of_var_with_missing_values(description, file_name): df_of_var_with_missing_values = display_var_with_missing_values (description) list_of_var = list (display_var_with_missing_values (description).index) dict_of_var = create_dict_of_var_and_description (file_name) for var in list_of_var: print (var, ':', dict_of_var[var]) df_of_var_with_missing_values = display_var_with_missing_values (description) df_of_var_with_missing_values X_clean_descr = X_Cleaned.describe() X_clean_descr.round(4) ''' Now that the data is cleaned, we move onto training models. We will use simple models: - stochastic gradient descent for regression. - support vector machine (both classifier and regression (linear and polynomial)). - decision trees (both classifier and regression). Note that we could train both classifications and regressions because the target values are household poverty levels (output values are 1, 2, 3, 4). To train the models, we need to find the right parameters for each model and make sure the models do not overfit the data. Therefore, we use cross validation grid search. Finding the right parameters will require trial and error. In addition, the randomness of the models may produce different parameters if the kernel is reset. After we train the models, we will use the ensemble method to combine all the models. But before we do that, we have to make sure that the errors that each model makes are uncorrelated with one another, elsewise models will make the same mistake, allowing majorities for the wrong class. ''' print ('') from sklearn.model_selection import GridSearchCV # X_Cleaned_and_transformed for X def find_best_param_from_Grid_Search (model, param_grid, scoring, X, y): grid_search = GridSearchCV(model, param_grid, cv=5, scoring=scoring) grid_search.fit(X, y ) return grid_search.best_params_ # scoring types f1 = 'f1_macro' # this is the scoring type of the Kaggle Competition (only for classifiers) mse = 'neg_mean_squared_error' from sklearn.linear_model import SGDRegressor ''' l2: Ridge Regression (reduce overfitting) alpha: parameter to tune for l2 eta0: Learning rate n_iter: number of times the model runs to train ''' sgd_reg = SGDRegressor(penalty='l2') sgd_param_grid = [ {'alpha': [.00020, .00025, .00030 ], 'n_iter': [100, 150, 200], 'eta0': [.001, .005, .01] } ] sgd_best_param = find_best_param_from_Grid_Search (sgd_reg, sgd_param_grid, mse, X_Cleaned_and_transformed, y) sgd_best_param sgd_reg = SGDRegressor(penalty='l2', n_iter=200 , eta0= .005, alpha= 0.0002) sgd_reg.fit (X_Cleaned_and_transformed, y ) from sklearn.svm import LinearSVR # linear support vector regression svm_reg = LinearSVR() svm_param_grid = [ {'C': [.5, 1, 1.5], 'epsilon': [.1, .15, .2]} ] svm_best_param = find_best_param_from_Grid_Search (svm_reg, svm_param_grid, mse, X_Cleaned_and_transformed, y) svm_best_param svm_reg = LinearSVR(C=.5, epsilon=.2) svm_reg.fit (X_Cleaned_and_transformed, y ) from sklearn.svm import LinearSVC svc_clf = LinearSVC(loss='hinge') svc_param_grid = [ {'C': [.5, 1, 1.5]} ] svc_best_param = find_best_param_from_Grid_Search (svc_clf, svc_param_grid, f1, X_Cleaned_and_transformed, y) svc_best_param svc_clf = LinearSVC(loss='hinge', C=1) svc_clf.fit (X_Cleaned_and_transformed, y ) from sklearn.svm import SVC svc_poly_clf = SVC(kernel='rbf') # polynomial classfier with radial basis function svc_poly_param_grid = [ {'C': [.5, 1, 1.5], 'gamma': [10, 20, 30]} ] svc_best_param = find_best_param_from_Grid_Search (svc_poly_clf, svc_poly_param_grid, f1, X_Cleaned_and_transformed, y) svc_best_param svc_poly_clf = SVC(kernel='rbf', C=1.5, gamma=10) svc_poly_clf.fit (X_Cleaned_and_transformed, y ) from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier() tree_param_grid = [{ 'max_depth': [20, 25, 30], 'min_samples_leaf': [35, 40, 45], 'max_leaf_nodes': [40, 45, 50], 'max_features': [25, 30, 35] }] tree_clf_best_param = find_best_param_from_Grid_Search (tree_clf, tree_param_grid, f1, X_Cleaned_and_transformed, y) tree_clf_best_param tree_clf = DecisionTreeClassifier(max_depth = 30, max_features = 35, max_leaf_nodes=50, min_samples_leaf=35) tree_clf.fit (X_Cleaned_and_transformed, y ) from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor() tree_reg_param_grid = [{ 'max_depth': [35, 40, 45], 'min_samples_leaf': [25, 30, 35], 'max_leaf_nodes': [60, 65, 70], 'max_features': [20, 25, 30] }] tree_reg_best_param = find_best_param_from_Grid_Search (tree_reg, tree_reg_param_grid, mse, X_Cleaned_and_transformed, y) tree_reg_best_param tree_reg = DecisionTreeRegressor(max_depth = 35, max_features = 30, max_leaf_nodes=65, min_samples_leaf=25) tree_reg.fit (X_Cleaned_and_transformed, y ) # Now to create an ensemble. models = [sgd_reg, svm_reg, svc_clf, svc_poly_clf, tree_clf, tree_reg] model_names = ['sgd_reg', 'svm_reg', 'svc_clf', 'svc_poly_clf', 'tree_clf', 'tree_reg'] some_data = X_test.copy() labels = y_test.copy() def hard_vote_ensemble_clf (models, model_names, some_data): prediction_df = pd.DataFrame({'FILLER' : [np.nan]}) for i, model in enumerate( models): model_prediction = pd.DataFrame (model.predict(some_data), columns=[model_names[i]]) prediction_df = pd.concat([prediction_df, model_prediction], axis=1) prediction_df.drop ('FILLER', axis=1, inplace=True) # Round the regressors (and if greater than 5, set the value to 4). prediction_df['Target'] = prediction_df.mean(axis=1).round() prediction_df[prediction_df > 4] = 4 # get only the average (result of the ensemble), and the entire dataframe of predictions return prediction_df['Target'], prediction_df.round() ensemble, entire_ensemble = hard_vote_ensemble_clf (models, model_names, X_Cleaned_and_transformed) entire_ensemble.head() residual = entire_ensemble.sub(y.reset_index().drop('index', axis=1).squeeze(), axis=0) residual.corr() # None of the correlations are greater than .9, so we do not need to drop any of the models. from sklearn.metrics import confusion_matrix conf_mx = confusion_matrix(ensemble, y) # each row is the actual class, each column is the predicted class print (conf_mx) ''' Now we use the ensemble classifier to predict the test set. ''' def clean_predict (some_data, Dropped_Var): replaced_strings_df = some_data.replace ('no', 0).replace ('yes', np.nan).drop (['Id', 'idhogar'], axis=1) replaced_strings_df = replaced_strings_df.fillna ({'edjefe': 1, 'edjefa': 1}) dropped = replaced_strings_df.drop (Dropped_Var, axis=1) some_data_prepared = trans_pipeline.transform(dropped) df = pd.DataFrame (some_data_prepared, columns = dropped.columns) return df def hard_vote_ensemble_clf_test (models, model_names, some_data, Dropped_Var): df = clean_predict (some_data, Dropped_Var) ensemble, entire_ensemble = hard_vote_ensemble_clf (models, model_names, df) return ensemble ensemble_test = hard_vote_ensemble_clf_test (models, model_names, some_data, Dropped_Var ) conf_mx_test = confusion_matrix (ensemble_test, labels) conf_mx_test ''' Now to predict the final set without target labels ''' final_ensemble_predict = hard_vote_ensemble_clf_test (models, model_names, final_test_set, Dropped_Var ) final_predictions_df = pd.DataFrame ({ 'Id': final_test_set['Id'], 'Target': final_ensemble_predict }) final_predictions_df = final_predictions_df.set_index ('Id') final_predictions_df.head() final_predictions_df.to_csv ('test_predictions.csv') ```
github_jupyter
def warn(*args, **kwargs): pass import warnings warnings.warn = warn # to remove warnings import pandas as pd, numpy as np, matplotlib.pyplot as plt % matplotlib notebook original_data = pd.read_csv ('train.csv') final_test_set = pd.read_csv ('test.csv') ''' The target values aren't given (they're on the Kaggle Website), so the final_test_set is not used to check for overfitting. ''' original_data.head() original_data.shape description = original_data.describe() description.round (2) def create_dict_of_var_and_description (file_name): ''' This is used to create a dictionary from the Kaggle document that describes all the variables ''' open_file = open(file_name) file = open_file.read() open_file.close() dict = {} for var_descrip in file.split('\n'): var, description = var_descrip.split(',', 1) dict[var] = description return dict var_file_name = 'Var_name_and_description' dict_of_var = create_dict_of_var_and_description (var_file_name) dict_of_var['v2a1'] # example def find_columns_with_string_values (df): ''' This prints the variable that contains strings (it can contain strings and numbers), the description, and the unique values. ''' dtypes_of_data = pd.DataFrame (df.dtypes) list_of_str_var = dtypes_of_data [dtypes_of_data == object].dropna().index for var in list_of_str_var[1:]: # ignore the first column 'Id' because that was not in the Kaggle Variable File print (var, ':', dict_of_var[var], '\n', df[var].unique()) find_columns_with_string_values (original_data) from sklearn.model_selection import train_test_split train, test = train_test_split (original_data, test_size=0.2, random_state=42) X, y = train.drop('Target', axis=1), train['Target'].copy() X_test, y_test = test.drop('Target', axis=1), test['Target'].copy() def deal_with_string_values (X): ''' For example, the columns that contain no and yes are dependency, edjefe, edjefa. For edjefe and edjefa, yes means 1, and no means 0. For dependency, We assume that when people answered the survey, no means 0 and yes means the average. ''' replaced_strings_df = X.replace ('no', 0).replace ('yes', np.nan).drop (['Id', 'idhogar'], axis=1) replaced_strings_df = replaced_strings_df.fillna ({'edjefe': 1, 'edjefa': 1}) from sklearn.preprocessing import Imputer imputer = Imputer(strategy='median') replace_NA_with_median = imputer.fit_transform (replaced_strings_df) return pd.DataFrame (replace_NA_with_median, columns = replaced_strings_df.columns) def drop_variables_with_low_corr_with_target (X, original_data, threshold): ''' If the correlation of a certain variable with the target is less than the threshold, we drop the variables from the dataframe''' train = original_data.copy() corr_matrix = train.corr() corr_target = corr_matrix['Target'].sort_values(ascending=False) over_threshold_var = corr_target[corr_target.abs() < threshold].index return X.drop (over_threshold_var, axis=1), over_threshold_var def Clean_Data (X, original_data, threshold): ''' Combines the above two functions, and returns a dataframe and the dropped variables (so we can drop the same variables during the test set, and the final test set)''' replace_string_transform = deal_with_string_values (X) drop_low_corr, dropped_var = drop_variables_with_low_corr_with_target (replace_string_transform, original_data, threshold) return drop_low_corr, dropped_var ''' We use standard scaler to account for different scales, and imputer to save the median. As a result, we could transform the test data using the median, variance, and mean from the train set, and not that of the test set. ''' from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer, StandardScaler trans_pipeline = Pipeline([ ('imputer', Imputer(strategy='median')), ('std_scaler', StandardScaler()) ]) X_Cleaned, Dropped_Var = Clean_Data (X, original_data, .1) X_Cleaned_and_transformed = pd.DataFrame (trans_pipeline.fit_transform (X_Cleaned), columns = X_Cleaned.columns) X_Cleaned_and_transformed.head() def display_var_with_missing_values (description): var = description.copy() count = description.loc['count'] with pd.option_context('display.max_rows', None, 'display.max_columns', 3): return pd.DataFrame ((count [count != 9557.0])) def get_description_of_values_of_var_with_missing_values(description, file_name): df_of_var_with_missing_values = display_var_with_missing_values (description) list_of_var = list (display_var_with_missing_values (description).index) dict_of_var = create_dict_of_var_and_description (file_name) for var in list_of_var: print (var, ':', dict_of_var[var]) df_of_var_with_missing_values = display_var_with_missing_values (description) df_of_var_with_missing_values X_clean_descr = X_Cleaned.describe() X_clean_descr.round(4) ''' Now that the data is cleaned, we move onto training models. We will use simple models: - stochastic gradient descent for regression. - support vector machine (both classifier and regression (linear and polynomial)). - decision trees (both classifier and regression). Note that we could train both classifications and regressions because the target values are household poverty levels (output values are 1, 2, 3, 4). To train the models, we need to find the right parameters for each model and make sure the models do not overfit the data. Therefore, we use cross validation grid search. Finding the right parameters will require trial and error. In addition, the randomness of the models may produce different parameters if the kernel is reset. After we train the models, we will use the ensemble method to combine all the models. But before we do that, we have to make sure that the errors that each model makes are uncorrelated with one another, elsewise models will make the same mistake, allowing majorities for the wrong class. ''' print ('') from sklearn.model_selection import GridSearchCV # X_Cleaned_and_transformed for X def find_best_param_from_Grid_Search (model, param_grid, scoring, X, y): grid_search = GridSearchCV(model, param_grid, cv=5, scoring=scoring) grid_search.fit(X, y ) return grid_search.best_params_ # scoring types f1 = 'f1_macro' # this is the scoring type of the Kaggle Competition (only for classifiers) mse = 'neg_mean_squared_error' from sklearn.linear_model import SGDRegressor ''' l2: Ridge Regression (reduce overfitting) alpha: parameter to tune for l2 eta0: Learning rate n_iter: number of times the model runs to train ''' sgd_reg = SGDRegressor(penalty='l2') sgd_param_grid = [ {'alpha': [.00020, .00025, .00030 ], 'n_iter': [100, 150, 200], 'eta0': [.001, .005, .01] } ] sgd_best_param = find_best_param_from_Grid_Search (sgd_reg, sgd_param_grid, mse, X_Cleaned_and_transformed, y) sgd_best_param sgd_reg = SGDRegressor(penalty='l2', n_iter=200 , eta0= .005, alpha= 0.0002) sgd_reg.fit (X_Cleaned_and_transformed, y ) from sklearn.svm import LinearSVR # linear support vector regression svm_reg = LinearSVR() svm_param_grid = [ {'C': [.5, 1, 1.5], 'epsilon': [.1, .15, .2]} ] svm_best_param = find_best_param_from_Grid_Search (svm_reg, svm_param_grid, mse, X_Cleaned_and_transformed, y) svm_best_param svm_reg = LinearSVR(C=.5, epsilon=.2) svm_reg.fit (X_Cleaned_and_transformed, y ) from sklearn.svm import LinearSVC svc_clf = LinearSVC(loss='hinge') svc_param_grid = [ {'C': [.5, 1, 1.5]} ] svc_best_param = find_best_param_from_Grid_Search (svc_clf, svc_param_grid, f1, X_Cleaned_and_transformed, y) svc_best_param svc_clf = LinearSVC(loss='hinge', C=1) svc_clf.fit (X_Cleaned_and_transformed, y ) from sklearn.svm import SVC svc_poly_clf = SVC(kernel='rbf') # polynomial classfier with radial basis function svc_poly_param_grid = [ {'C': [.5, 1, 1.5], 'gamma': [10, 20, 30]} ] svc_best_param = find_best_param_from_Grid_Search (svc_poly_clf, svc_poly_param_grid, f1, X_Cleaned_and_transformed, y) svc_best_param svc_poly_clf = SVC(kernel='rbf', C=1.5, gamma=10) svc_poly_clf.fit (X_Cleaned_and_transformed, y ) from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier() tree_param_grid = [{ 'max_depth': [20, 25, 30], 'min_samples_leaf': [35, 40, 45], 'max_leaf_nodes': [40, 45, 50], 'max_features': [25, 30, 35] }] tree_clf_best_param = find_best_param_from_Grid_Search (tree_clf, tree_param_grid, f1, X_Cleaned_and_transformed, y) tree_clf_best_param tree_clf = DecisionTreeClassifier(max_depth = 30, max_features = 35, max_leaf_nodes=50, min_samples_leaf=35) tree_clf.fit (X_Cleaned_and_transformed, y ) from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor() tree_reg_param_grid = [{ 'max_depth': [35, 40, 45], 'min_samples_leaf': [25, 30, 35], 'max_leaf_nodes': [60, 65, 70], 'max_features': [20, 25, 30] }] tree_reg_best_param = find_best_param_from_Grid_Search (tree_reg, tree_reg_param_grid, mse, X_Cleaned_and_transformed, y) tree_reg_best_param tree_reg = DecisionTreeRegressor(max_depth = 35, max_features = 30, max_leaf_nodes=65, min_samples_leaf=25) tree_reg.fit (X_Cleaned_and_transformed, y ) # Now to create an ensemble. models = [sgd_reg, svm_reg, svc_clf, svc_poly_clf, tree_clf, tree_reg] model_names = ['sgd_reg', 'svm_reg', 'svc_clf', 'svc_poly_clf', 'tree_clf', 'tree_reg'] some_data = X_test.copy() labels = y_test.copy() def hard_vote_ensemble_clf (models, model_names, some_data): prediction_df = pd.DataFrame({'FILLER' : [np.nan]}) for i, model in enumerate( models): model_prediction = pd.DataFrame (model.predict(some_data), columns=[model_names[i]]) prediction_df = pd.concat([prediction_df, model_prediction], axis=1) prediction_df.drop ('FILLER', axis=1, inplace=True) # Round the regressors (and if greater than 5, set the value to 4). prediction_df['Target'] = prediction_df.mean(axis=1).round() prediction_df[prediction_df > 4] = 4 # get only the average (result of the ensemble), and the entire dataframe of predictions return prediction_df['Target'], prediction_df.round() ensemble, entire_ensemble = hard_vote_ensemble_clf (models, model_names, X_Cleaned_and_transformed) entire_ensemble.head() residual = entire_ensemble.sub(y.reset_index().drop('index', axis=1).squeeze(), axis=0) residual.corr() # None of the correlations are greater than .9, so we do not need to drop any of the models. from sklearn.metrics import confusion_matrix conf_mx = confusion_matrix(ensemble, y) # each row is the actual class, each column is the predicted class print (conf_mx) ''' Now we use the ensemble classifier to predict the test set. ''' def clean_predict (some_data, Dropped_Var): replaced_strings_df = some_data.replace ('no', 0).replace ('yes', np.nan).drop (['Id', 'idhogar'], axis=1) replaced_strings_df = replaced_strings_df.fillna ({'edjefe': 1, 'edjefa': 1}) dropped = replaced_strings_df.drop (Dropped_Var, axis=1) some_data_prepared = trans_pipeline.transform(dropped) df = pd.DataFrame (some_data_prepared, columns = dropped.columns) return df def hard_vote_ensemble_clf_test (models, model_names, some_data, Dropped_Var): df = clean_predict (some_data, Dropped_Var) ensemble, entire_ensemble = hard_vote_ensemble_clf (models, model_names, df) return ensemble ensemble_test = hard_vote_ensemble_clf_test (models, model_names, some_data, Dropped_Var ) conf_mx_test = confusion_matrix (ensemble_test, labels) conf_mx_test ''' Now to predict the final set without target labels ''' final_ensemble_predict = hard_vote_ensemble_clf_test (models, model_names, final_test_set, Dropped_Var ) final_predictions_df = pd.DataFrame ({ 'Id': final_test_set['Id'], 'Target': final_ensemble_predict }) final_predictions_df = final_predictions_df.set_index ('Id') final_predictions_df.head() final_predictions_df.to_csv ('test_predictions.csv')
0.63114
0.547525
``` import numpy as np def rediagonalization(current_matrix, coupling_matrix, type = 'sensing'): """Refine existing diagonalization matrix given a coupling matrix. Parameters ---------- current_matrix: list of (list of int/float) or 2D numpy.ndarray The matrix to be refined. coupling_matrix: list of (list of int/float) or 2D numpy.ndarray The measured coupling. For sensing matrix, coupling_matrix[i][j] is the coupling ratio between the i-th component and j-th component of the displacement vector. Hence, the diagonal elements should be 1. For actuation matrix, coupling matrix[i][j] is the ratio between the i-th component of displacement and j-th component in actuation. The diagonal elements should be comparable to the actuation efficiency of the particular DoF. type: string Specifying what kind of matrix we are diagonalizing. Either 'sensing' or 'actuation' Returns ------- numpy.ndarray The refined diagonalziation matrix. """ current_matrix = np.array(current_matrix) coupling_matrix = np.array(coupling_matrix) diagonal_coupling = np.array(np.diag(np.diag(coupling_matrix))) if type == 'sensing': new_matrix = np.matmul(np.linalg.inv(coupling_matrix), current_matrix) return(new_matrix) elif type == 'actuation': new_matrix = np.matmul(current_matrix, np.matmul(np.linalg.inv(coupling_matrix), diagonal_coupling)) return(new_matrix) else: print("Please specify type, either 'sensing' or 'actuation'.") return(None) # sensing SRM OL2EUL ol2eul=np.array([[-0.04892, -0.03073, -3.8702, 0], [2.56819, 0.05846, 1.26304, 0], [-0.02427, 3.28096, -0.01598, 0]]) coupling_matrix = np.array([[1, -0.0174, -0.00243], [0, 1, 0.0029], [0.0256, 0.00288, 1]]) C = np.array([[1,0,0],[(-69.3216+78.0996)/(120.5889-17.8238),1,0],[0,0,1]]) # np.matmul(C,coupling_matrix) # (-69.3216+78.0996)/(120.5889-17.8238) new_ol2eul = rediagonalization(current_matrix = ol2eul, coupling_matrix = coupling_matrix, type = 'sensing') new_ol2eul = rediagonalization(current_matrix = new_ol2eul, coupling_matrix = C, type = 'sensing') new_ol2eul # actuation SRM TM_EUL2OSEM coupling_matrix = np.array([[0.0003736428499, 0.00017566015, -2.71734459e-6], [-4.90803115e-5, 0.0184034, 6.434345769e-5], [-5.651684397e-5, 0.0002424936, 0.00703429]]) coupling_matrix = coupling_matrix.T eul2osem = np.array([[0.282, 1.128, -1.128], [0.25, -1, -1], [0.242, -0.968, 0.968], [0.231, 0.924, 0.924]]) new_eul2osem = rediagonalization(current_matrix=eul2osem, coupling_matrix=coupling_matrix, type = 'actuation') new_eul2osem np.ones((2,3)) ```
github_jupyter
import numpy as np def rediagonalization(current_matrix, coupling_matrix, type = 'sensing'): """Refine existing diagonalization matrix given a coupling matrix. Parameters ---------- current_matrix: list of (list of int/float) or 2D numpy.ndarray The matrix to be refined. coupling_matrix: list of (list of int/float) or 2D numpy.ndarray The measured coupling. For sensing matrix, coupling_matrix[i][j] is the coupling ratio between the i-th component and j-th component of the displacement vector. Hence, the diagonal elements should be 1. For actuation matrix, coupling matrix[i][j] is the ratio between the i-th component of displacement and j-th component in actuation. The diagonal elements should be comparable to the actuation efficiency of the particular DoF. type: string Specifying what kind of matrix we are diagonalizing. Either 'sensing' or 'actuation' Returns ------- numpy.ndarray The refined diagonalziation matrix. """ current_matrix = np.array(current_matrix) coupling_matrix = np.array(coupling_matrix) diagonal_coupling = np.array(np.diag(np.diag(coupling_matrix))) if type == 'sensing': new_matrix = np.matmul(np.linalg.inv(coupling_matrix), current_matrix) return(new_matrix) elif type == 'actuation': new_matrix = np.matmul(current_matrix, np.matmul(np.linalg.inv(coupling_matrix), diagonal_coupling)) return(new_matrix) else: print("Please specify type, either 'sensing' or 'actuation'.") return(None) # sensing SRM OL2EUL ol2eul=np.array([[-0.04892, -0.03073, -3.8702, 0], [2.56819, 0.05846, 1.26304, 0], [-0.02427, 3.28096, -0.01598, 0]]) coupling_matrix = np.array([[1, -0.0174, -0.00243], [0, 1, 0.0029], [0.0256, 0.00288, 1]]) C = np.array([[1,0,0],[(-69.3216+78.0996)/(120.5889-17.8238),1,0],[0,0,1]]) # np.matmul(C,coupling_matrix) # (-69.3216+78.0996)/(120.5889-17.8238) new_ol2eul = rediagonalization(current_matrix = ol2eul, coupling_matrix = coupling_matrix, type = 'sensing') new_ol2eul = rediagonalization(current_matrix = new_ol2eul, coupling_matrix = C, type = 'sensing') new_ol2eul # actuation SRM TM_EUL2OSEM coupling_matrix = np.array([[0.0003736428499, 0.00017566015, -2.71734459e-6], [-4.90803115e-5, 0.0184034, 6.434345769e-5], [-5.651684397e-5, 0.0002424936, 0.00703429]]) coupling_matrix = coupling_matrix.T eul2osem = np.array([[0.282, 1.128, -1.128], [0.25, -1, -1], [0.242, -0.968, 0.968], [0.231, 0.924, 0.924]]) new_eul2osem = rediagonalization(current_matrix=eul2osem, coupling_matrix=coupling_matrix, type = 'actuation') new_eul2osem np.ones((2,3))
0.861217
0.851398
# NYC Taxi Trips Example This data is freely available. You can find some interesting background information at https://chriswhong.com/open-data/foil_nyc_taxi/ . We will use this data to perform some analytical tasks. The whole wotkshop is split up into multiple sections, which represents the typical data processing flow in a data centric project. We will follow the (simplified) steps when using a data lake. 1. Build "Structured Zone" containing all sources 2. Build "Refined Zone" that contains pre-processed data 3. Analyze the data before working on the next steps to find an appropriate approach 4. Build "Integrated Zone" that contains integrated data 5. Use Machine Learning for business questions ## Requirements The workshop will require the following Python packages: * PySpark (tested with Spark 2.4) * Matplotlib * Pandas * GeoPandas * Cartopy * Contextily # Part 1 - Build Structured Zone The first part is about building the structured zone. It will contain a copy of the raw data stored in Hive tables and thereby easily accessible for downstream processing. ``` taxi_basedir = "s3://dimajix-training/data/nyc-taxi-trips/" weather_basedir = "s3://dimajix-training/data/weather/" holidays_basedir = "s3://dimajix-training/data/bank-holidays/" ``` # 0 Create Spark Session Before we begin, we create a Spark session if none was provided in the notebook. ``` import pyspark.sql.functions as f from pyspark.sql import SparkSession if not 'spark' in locals(): spark = ( SparkSession.builder.master("local[*]") .config("spark.driver.memory", "64G") .getOrCreate() ) spark ``` # 1 Taxi Data This data is freely available. You can find some interesting background information at https://chriswhong.com/open-data/foil_nyc_taxi/ . In the first step we read in the raw data. The data is split into two different entities: Basic trip information and payment information. We will store the data in a more efficient representation (Parquet) to form the structured zone. ## 1.1 Trip Information We start with reading in the trip information. It contains the following columns * **medallion** - This is some sort of a license for a taxi company. A single medallion is attached to a single cab and may be used by multiple drivers. * **hack_license** - This is the drivers license * **vendor_id** * **rate_code** The final rate code in effect at the end of the trip. * 1=Standard rate * 2=JFK * 3=Newark * 4=Nassau or Westchester * 5=Negotiated fare * 6=Group ride * **store_and_fwd_flag** This flag indicates whether the trip record was held in vehicle memory before sending to the vendor, aka “store and forward,” because the vehicle did not have a connection to the server * **pickup_datetime** This is the time when a passenger was picked up * **dropoff_datetime** This is the time when the passenger was dropped off again * **passenger_count** Number of passengers of this trip * **trip_time_in_secs** * **trip_distance** * **pickup_longitude** * **pickup_latitude** * **dropoff_longitude** * **dropoff_latitude** The primary key uniquely identifying each trip is given by the columns `medallion`, `hack_license`, `vendor_id` and `pickip_datetime`. ``` from pyspark.sql.types import * trip_schema = StructType( [ StructField('medallion', StringType()), StructField('hack_license', StringType()), StructField('vendor_id', StringType()), StructField('rate_code', StringType()), StructField('store_and_fwd_flag', StringType()), StructField('pickup_datetime', TimestampType()), StructField('dropoff_datetime', TimestampType()), StructField('passenger_count', IntegerType()), StructField('trip_time_in_secs', IntegerType()), StructField('trip_distance', DoubleType()), StructField('pickup_longitude', DoubleType()), StructField('pickup_latitude', DoubleType()), StructField('dropoff_longitude', DoubleType()), StructField('dropoff_latitude', DoubleType()), ] ) # Read in the data into a PySpark DataFrame using the schema above # location: taxi_basedir + "/data/" # format: csv # header: True trip_data = ( spark.read.option("header", True).schema(trip_schema).csv(taxi_basedir + "/data/") ) ``` Inspect the first 10 rows by converting them to a Pandas DataFrame. ``` trip_data.limit(10).toPandas() ``` ### Inspect Schema Just to be sure, let us inspect the schema. It should match exactly the specified one. ``` trip_data.printSchema() ``` ### Write into Structured Zone Now we store data into Hive tables as parquet files. In order to do that, we first create an empty Hive database "taxi", in order to reflect the source of the data. ``` spark.sql("CREATE DATABASE IF NOT EXISTS taxi") # Write the DataFrame trip_data into Hive by using the method `saveAsTable` # format: parquet # Hive table: taxi.trip trip_data.write.format("parquet").saveAsTable("taxi.trip") ``` ## 1.2 Fare information Now we read in the second table containing the trips fare information. * **medallion** - This is some sort of a license for a taxi company * **hack_license** - This is the drivers license * **vendor_id** * **pickup_datetime** This is the time when a passenger was picked up * **payment_type** A numeric code signifying how the passenger paid for the trip. * CRD = Credit card * CDH = Cash * ??? = No charge * ??? = Dispute * ??? = Unknown * ??? = Voided trip * **fare_amount** The time-and-distance fare calculated by the meter * **surcharge** * **mta_tax** $0.50 MTA tax that is automatically triggered based on the metered rate in use * **tip_amount** Tip amount –This field is automatically populated for credit card tips. Cash tips are not included * **tolls_amount** Total amount of all tolls paid in trip * **total_amount** The total amount charged to passengers. Does not include cash tips. ``` fare_schema = StructType( [ StructField('medallion', StringType()), StructField('hack_license', StringType()), StructField('vendor_id', StringType()), StructField('pickup_datetime', TimestampType()), StructField('payment_type', StringType()), StructField('fare_amount', DoubleType()), StructField('surcharge', DoubleType()), StructField('mta_tax', DoubleType()), StructField('tip_amount', DoubleType()), StructField('tolls_amount', DoubleType()), StructField('total_amount', DoubleType()), ] ) trip_fare = ( spark.read.option("header", True) .option("ignoreLeadingWhiteSpace", True) .schema(fare_schema) .csv(taxi_basedir + "/fare/") ) trip_fare.limit(10).toPandas() ``` ### Inspect Schema Let us inspect the schema of the data, which should match exactly the schema that we originally specified ``` trip_fare.printSchema() ``` ### Store into Structured Zone Finally store the data into the structured zone as Parquet files into the sub directory `taxi-fare` ``` trip_fare.write.format("parquet").saveAsTable("taxi.fare") ``` # 2. Weather Data In order to improve our analysis, we will relate the taxi trips with weather information. We use the NOAA ISD weather data (https://www.ncdc.noaa.gov/isd), which contains measurements from many stations around the world, some of them dating back to 1901. You can download all data from ftp://ftp.ncdc.noaa.gov/pub/data/noaa . We will only use a small subset of the data which is good enough for our purposes. ## 2.1 Station Master Data The weather data is split up into two different data sets: the measurements themselves and meta data about the stations. The later contains valuable information like the geo location of the weather station. This will be useful when trying to find the weather station nearest to all taxi trips. Among other data the columns provide specifically the following informations * **USAF** & **WBAN** - weather station id * **CTRY** - the country of the weather station * **STATE** - the state of the weather station * **LAT** & **LONG** - latitude and longitude of the weather station (geo coordinates) * **BEGIN** & **END** - date range when this weather station was active ``` # Read in weather station master data into a PySpark DataFrame weather_stations # location: weather_basedir + "/isd-history/" # format: csv # header: True weather_stations = spark.read.option("header", True).csv( weather_basedir + "/isd-history/" ) weather_stations.limit(10).toPandas() ``` ### Store data into Structured Zone In the next step we want to store the data as Parquet files (which are much more efficient and very well supported by most batch frameworks in the Hadoop and Spark universe). In order to do so, we first need to rename some columns, which contain unsupported characters: * "STATION NAME" => "STATION_NAME" * "ELEV(M)" => "ELEVATION" After the columns have been renamed, the data frame is written into the structured zone into the Hive table `isd.stations` using the `DataFrame.write.saveAsTable` function. But we also need to take care of creating the Hive database `isd` first. ``` # Create Hive database "isd" spark.sql("CREATE DATABASE IF NOT EXISTS isd") # Write stations into Hive table "isd.stations" weather_stations.withColumnRenamed("STATION NAME", "STATION_NAME").withColumnRenamed( "ELEV(M)", "ELEVATION" ).write.format("parquet").saveAsTable("isd.stations") ``` ### Read in data agin Using the `spark.read.parquet` function we read in the data back into Spark and display some records. ``` weather_stations = spark.read.table("isd.stations") weather_stations.limit(10).toPandas() ``` ## 2.2 Weather Measurements Now we will work with the second and more interesting part of the NOAA weather data set: The measurements. These are stored in different subdirectories per year. For us, the year 2013 is good enough, since the taxi trips are all from 2013. The data format is a proprietary ASCII encoding, so we use the `spark.read.text` method to read each line as one record. ``` # Read raw measurements into PySpark DataFrame raw_weather # location: weather_basedir + "/2013" # format: text raw_weather = spark.read.text(weather_basedir + "/2013") raw_weather.limit(10).toPandas() ``` ### Extract precipitation Now we extract the precipitation from the measurements. This is not trivial, since that information is stored in a variable part. We assume that the record contains precipitation data when it contains the substring `AA1` at position 109. This denotes the type of the subsection in the data record followed by the number of hours of this measurement and the precipitation depth. We use some PySpark string functions to extract the data. ``` raw_weather.select( f.substring(raw_weather["value"], 106, 999), f.instr(raw_weather["value"], "AA1").alias("s"), f.when( f.instr(raw_weather["value"], "AA1") == 109, f.substring(raw_weather["value"], 109 + 3, 8), ).alias("AAD"), ).withColumn( "precipitation_hours", f.substring(f.col("AAD"), 1, 2).cast("INT") ).withColumn( "precipitation_depth", f.substring(f.col("AAD"), 3, 4).cast("FLOAT") ).filter( f.col("precipitation_depth") > 0 ).limit( 10 ).toPandas() ``` ### Extract all relevant measurements The precipitation was the hardest part. Other measurements like wind speed and air temperature are stored at fixed positions together with some quality flags denoting if a measurement is valid. In the following statement, we extract all relevant measurements. Specifically we extract the following information * **USAF** & **WBAN** - weather station identifier * **ts** - timestamp of measurement * **wind_direction** - wind direction (in degrees) * **wind_direction_qual** - quality flag of the wind direction * **wind_speed** - wind speed * **wind_speed_qual** - quality flag indicating the validity of the wind speed * **air_temperature** - air temperature in degree Celsius * **air_temperature_qual** - quality flag for air temperature * **precipitation_hours** * **precipitation_depth** ``` weather = ( raw_weather.select( f.substring(raw_weather["value"], 5, 6).alias("usaf"), f.substring(raw_weather["value"], 11, 5).alias("wban"), f.to_timestamp(f.substring(raw_weather["value"], 16, 12), "yyyyMMddHHmm").alias( "ts" ), f.substring(raw_weather["value"], 42, 5).alias("report_type"), f.substring(raw_weather["value"], 61, 3).alias("wind_direction"), f.substring(raw_weather["value"], 64, 1).alias("wind_direction_qual"), f.substring(raw_weather["value"], 65, 1).alias("wind_observation"), (f.substring(raw_weather["value"], 66, 4).cast("float") / 10.0).alias( "wind_speed" ), f.substring(raw_weather["value"], 70, 1).alias("wind_speed_qual"), (f.substring(raw_weather["value"], 88, 5).cast("float") / 10.0).alias( "air_temperature" ), f.substring(raw_weather["value"], 93, 1).alias("air_temperature_qual"), f.when( f.instr(raw_weather["value"], "AA1") == 109, f.substring(raw_weather["value"], 109 + 3, 8), ).alias("AAD"), ) .withColumn("precipitation_hours", f.substring(f.col("AAD"), 1, 2).cast("INT")) .withColumn("precipitation_depth", f.substring(f.col("AAD"), 3, 4).cast("FLOAT")) .withColumn("date", f.to_date(f.col("ts"))) .drop("AAD") ) weather.limit(10).toPandas() ``` ### Store into Structured Zone After successful extraction, we write the result again into the structured zone into Hive table `isd.weather`. Since we originally have weather measurements for different years, we create a partitioned Hive table - although we are only interested in the weather of 2013. Unfortunately the support for writing into partitioned Hive tables is currently rather limited within the PySpark API. But we can perform everything using Spark SQL instead of the PySpark API. #### Create partitioned table As mentioned above, we cannot easily create a partitioned table using the PySpark API. But we can create one using Spark SQL. We create the required SQL statement dynamically from the given schema (but with a fixed partition column) ``` columns = [f.name + " " + f.dataType.simpleString() for f in weather.schema.fields] sql = ( "CREATE TABLE IF NOT EXISTS isd.weather(" + ",".join(columns) + ") PARTITIONED BY(year INT)" ) print(sql) spark.sql(sql) ``` #### Store data into partition In order to write into a single partition, we again need to use Spark SQL. Therefore we register the weather data of 2013 as a namedtemporary view for SQL access and then insert all records into one specific partition. ``` weather.createOrReplaceTempView("weather_2013") spark.sql( """ INSERT OVERWRITE TABLE isd.weather PARTITION(year=2013) SELECT * FROM weather_2013 """ ) ``` ### Read in from Structured Zone Again we read back the data from the Hive table `isd.weather` and display 10 first records. ``` weather = spark.read.table("isd.weather") weather.limit(10).toPandas() ``` # 3. Holidays Another important data source is additional date information, specifically if a certain date is a bank holiday. While other information like week days can be directly computed from a date, for bank holidays an additional source is required. We follow again the same approach of reading in the raw data and storing it into the structured zone as Parquet files. ``` holidays_schema = StructType( [ StructField('id', IntegerType()), StructField('date', DateType()), StructField('description', StringType()), StructField('bank_holiday', BooleanType()), ] ) holidays = ( spark.read.option("header", False).schema(holidays_schema).csv(holidays_basedir) ) holidays.limit(10).toPandas() ``` Again let us inspect the schema ``` holidays.printSchema() ``` ### Store into Structured Zone Same game. Let us create a new database `ref` for simple reference table and let us store the holidays into a table `ref.holidays`. ``` # Create Hive database ref spark.sql("CREATE DATABASE IF NOT EXISTS ref") holidays.write.format("parquet").saveAsTable("ref.holidays") ``` ### Read in from Structured Zone Again let us check if writing was successful. ``` holidays = spark.read.table("ref.holidays") holidays.limit(10).toPandas() ```
github_jupyter
taxi_basedir = "s3://dimajix-training/data/nyc-taxi-trips/" weather_basedir = "s3://dimajix-training/data/weather/" holidays_basedir = "s3://dimajix-training/data/bank-holidays/" import pyspark.sql.functions as f from pyspark.sql import SparkSession if not 'spark' in locals(): spark = ( SparkSession.builder.master("local[*]") .config("spark.driver.memory", "64G") .getOrCreate() ) spark from pyspark.sql.types import * trip_schema = StructType( [ StructField('medallion', StringType()), StructField('hack_license', StringType()), StructField('vendor_id', StringType()), StructField('rate_code', StringType()), StructField('store_and_fwd_flag', StringType()), StructField('pickup_datetime', TimestampType()), StructField('dropoff_datetime', TimestampType()), StructField('passenger_count', IntegerType()), StructField('trip_time_in_secs', IntegerType()), StructField('trip_distance', DoubleType()), StructField('pickup_longitude', DoubleType()), StructField('pickup_latitude', DoubleType()), StructField('dropoff_longitude', DoubleType()), StructField('dropoff_latitude', DoubleType()), ] ) # Read in the data into a PySpark DataFrame using the schema above # location: taxi_basedir + "/data/" # format: csv # header: True trip_data = ( spark.read.option("header", True).schema(trip_schema).csv(taxi_basedir + "/data/") ) trip_data.limit(10).toPandas() trip_data.printSchema() spark.sql("CREATE DATABASE IF NOT EXISTS taxi") # Write the DataFrame trip_data into Hive by using the method `saveAsTable` # format: parquet # Hive table: taxi.trip trip_data.write.format("parquet").saveAsTable("taxi.trip") fare_schema = StructType( [ StructField('medallion', StringType()), StructField('hack_license', StringType()), StructField('vendor_id', StringType()), StructField('pickup_datetime', TimestampType()), StructField('payment_type', StringType()), StructField('fare_amount', DoubleType()), StructField('surcharge', DoubleType()), StructField('mta_tax', DoubleType()), StructField('tip_amount', DoubleType()), StructField('tolls_amount', DoubleType()), StructField('total_amount', DoubleType()), ] ) trip_fare = ( spark.read.option("header", True) .option("ignoreLeadingWhiteSpace", True) .schema(fare_schema) .csv(taxi_basedir + "/fare/") ) trip_fare.limit(10).toPandas() trip_fare.printSchema() trip_fare.write.format("parquet").saveAsTable("taxi.fare") # Read in weather station master data into a PySpark DataFrame weather_stations # location: weather_basedir + "/isd-history/" # format: csv # header: True weather_stations = spark.read.option("header", True).csv( weather_basedir + "/isd-history/" ) weather_stations.limit(10).toPandas() # Create Hive database "isd" spark.sql("CREATE DATABASE IF NOT EXISTS isd") # Write stations into Hive table "isd.stations" weather_stations.withColumnRenamed("STATION NAME", "STATION_NAME").withColumnRenamed( "ELEV(M)", "ELEVATION" ).write.format("parquet").saveAsTable("isd.stations") weather_stations = spark.read.table("isd.stations") weather_stations.limit(10).toPandas() # Read raw measurements into PySpark DataFrame raw_weather # location: weather_basedir + "/2013" # format: text raw_weather = spark.read.text(weather_basedir + "/2013") raw_weather.limit(10).toPandas() raw_weather.select( f.substring(raw_weather["value"], 106, 999), f.instr(raw_weather["value"], "AA1").alias("s"), f.when( f.instr(raw_weather["value"], "AA1") == 109, f.substring(raw_weather["value"], 109 + 3, 8), ).alias("AAD"), ).withColumn( "precipitation_hours", f.substring(f.col("AAD"), 1, 2).cast("INT") ).withColumn( "precipitation_depth", f.substring(f.col("AAD"), 3, 4).cast("FLOAT") ).filter( f.col("precipitation_depth") > 0 ).limit( 10 ).toPandas() weather = ( raw_weather.select( f.substring(raw_weather["value"], 5, 6).alias("usaf"), f.substring(raw_weather["value"], 11, 5).alias("wban"), f.to_timestamp(f.substring(raw_weather["value"], 16, 12), "yyyyMMddHHmm").alias( "ts" ), f.substring(raw_weather["value"], 42, 5).alias("report_type"), f.substring(raw_weather["value"], 61, 3).alias("wind_direction"), f.substring(raw_weather["value"], 64, 1).alias("wind_direction_qual"), f.substring(raw_weather["value"], 65, 1).alias("wind_observation"), (f.substring(raw_weather["value"], 66, 4).cast("float") / 10.0).alias( "wind_speed" ), f.substring(raw_weather["value"], 70, 1).alias("wind_speed_qual"), (f.substring(raw_weather["value"], 88, 5).cast("float") / 10.0).alias( "air_temperature" ), f.substring(raw_weather["value"], 93, 1).alias("air_temperature_qual"), f.when( f.instr(raw_weather["value"], "AA1") == 109, f.substring(raw_weather["value"], 109 + 3, 8), ).alias("AAD"), ) .withColumn("precipitation_hours", f.substring(f.col("AAD"), 1, 2).cast("INT")) .withColumn("precipitation_depth", f.substring(f.col("AAD"), 3, 4).cast("FLOAT")) .withColumn("date", f.to_date(f.col("ts"))) .drop("AAD") ) weather.limit(10).toPandas() columns = [f.name + " " + f.dataType.simpleString() for f in weather.schema.fields] sql = ( "CREATE TABLE IF NOT EXISTS isd.weather(" + ",".join(columns) + ") PARTITIONED BY(year INT)" ) print(sql) spark.sql(sql) weather.createOrReplaceTempView("weather_2013") spark.sql( """ INSERT OVERWRITE TABLE isd.weather PARTITION(year=2013) SELECT * FROM weather_2013 """ ) weather = spark.read.table("isd.weather") weather.limit(10).toPandas() holidays_schema = StructType( [ StructField('id', IntegerType()), StructField('date', DateType()), StructField('description', StringType()), StructField('bank_holiday', BooleanType()), ] ) holidays = ( spark.read.option("header", False).schema(holidays_schema).csv(holidays_basedir) ) holidays.limit(10).toPandas() holidays.printSchema() # Create Hive database ref spark.sql("CREATE DATABASE IF NOT EXISTS ref") holidays.write.format("parquet").saveAsTable("ref.holidays") holidays = spark.read.table("ref.holidays") holidays.limit(10).toPandas()
0.642432
0.987314
``` import numpy as np import matplotlib.pyplot as plt import pandas import biograph from biograph import graphplot from biograph import hmrf_em ``` # Create graph We call the `graph_generation_func` from [**🍒 Griottes 🍒**](https://gitlab.pasteur.fr/gronteix1/spheroid-graphs) to build the network representation of the tissue we wish to study from the extracted cell positions. Each cell has been categorized into one of several possible cell types (denominated by the `cell_types` variable). ``` example_prop_frame = pandas.read_csv('example_data.csv') example_prop_frame['cell_type'] = example_prop_frame.cell_type.values.astype(int) color_list = [plt.cm.Set3(i) for i in range(len(example_prop_frame.cell_type.unique()))] colors = [color_list[example_prop_frame.loc[i, 'cell_type']] for i in range(len(example_prop_frame))] example_prop_frame['color'] = colors from griottes.graphmaker import graph_generation_func descriptors = ['label', 'cell_type', 'color'] G = graph_generation_func.generate_voronoi_graph(example_prop_frame, cell_descriptors = descriptors, dCells = 50) ``` # Plotting the regions To plot large networks and to visualize the final tissue structure we wrote the `scatter_plot_2D` function. The individual connections aren't represented. - `G` (networkX graph object): Network to plot. Needs to contain a `color` attribute for each node that is an RGB argument. - `scatterpoint_size` (float, optional): size of each node - `legend` (bool, optional): show the legend - `figsize` (tuple, optional): figure size - `dim_to_squeeze` (str, optional): axis to project along. One of (`'x'`, `'y'`, `'z'`). `'z'` is the default ``` graphplot.scatter_plot_2D(G, scatterpoint_size = 8, legend = True, figsize = (8,8)) ``` # Initiate first classes to start the algorithm The XXX takes as entry a networkX graph object with as unique requirement that each nodes admits an attribute `cell_type` that is an integer. The initialization takes into account the following parameters: - `G` (networkX graph object): Network to regionalize. Has at least `cell_type` as an attribute. - `K` (int): the number of final regions in the tissue - `beta` (float): the strength of the region coupling. A small value leads to less homogeneous regions. - `max_it` (int): number of iterations ``` biograph = hmrf_em.hmrf(G, K = 4, beta = 10, max_it = 30) biograph.initiate_model() biograph.run() latent_G = biograph.graph graphplot.scatter_plot_2D(latent_G, scatterpoint_size = 8, legend = True, figsize = (8,8)) ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt import pandas import biograph from biograph import graphplot from biograph import hmrf_em example_prop_frame = pandas.read_csv('example_data.csv') example_prop_frame['cell_type'] = example_prop_frame.cell_type.values.astype(int) color_list = [plt.cm.Set3(i) for i in range(len(example_prop_frame.cell_type.unique()))] colors = [color_list[example_prop_frame.loc[i, 'cell_type']] for i in range(len(example_prop_frame))] example_prop_frame['color'] = colors from griottes.graphmaker import graph_generation_func descriptors = ['label', 'cell_type', 'color'] G = graph_generation_func.generate_voronoi_graph(example_prop_frame, cell_descriptors = descriptors, dCells = 50) graphplot.scatter_plot_2D(G, scatterpoint_size = 8, legend = True, figsize = (8,8)) biograph = hmrf_em.hmrf(G, K = 4, beta = 10, max_it = 30) biograph.initiate_model() biograph.run() latent_G = biograph.graph graphplot.scatter_plot_2D(latent_G, scatterpoint_size = 8, legend = True, figsize = (8,8))
0.341253
0.942348
# 8.3 Lab: Decision Trees ``` import numpy as np import pandas as pd from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier, export_graphviz, DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error from sklearn import tree import graphviz import matplotlib.pyplot as plt %matplotlib inline ``` ## 8.3.1 Fitting Classification Trees The sklearn library has a lot of useful tools for tress. We first use classification trees to analyze the Carseats data set. In these data, Sales is a continuous variable, and so we begin by recoding it as a binary variable. We use the map() function to create a variable, called High, which takes on a value of 'Y' if the Sales variable exceeds 8, and takes on a value of 'N' otherwise. In Python, we need to code catergorical variable into dummy variable. ``` carseats = pd.read_csv('./data/Carseats.csv') carseats['High'] = carseats.Sales.map(lambda x: 'Y' if x>8 else 'N') carseats.ShelveLoc = pd.factorize(carseats.ShelveLoc)[0] carseats.Urban = carseats.Urban.map({'No':0, 'Yes':1}) carseats.US = carseats.US.map({'No':0, 'Yes':1}) carseats.info() ``` We first split the dataset into training (200 samples) and test sets. ``` X = carseats.drop(['Sales', 'High'], axis=1) y = carseats.High X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=200, random_state=0) ``` To build a tree, we could use 'gini' or 'entropy' as split criterion at each node. Here I provide an example use 'gini'. If we change the hyperparameters, the clf score jumps around. ``` clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100, max_depth=6, min_samples_leaf=4) clf_gini.fit(X_train, y_train) print clf_gini.score(X_train, y_train) ``` The most attractive feature of a tree is visulization. Here we first need to save the model file into a .dot file and graphviz.Source to display it. ``` export_graphviz(clf_gini, out_file="mytree.dot", feature_names=X_train.columns) with open("mytree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) y_pred = clf_gini.predict(X_test) cm = pd.DataFrame(confusion_matrix(y_test, y_pred).T, index=['No', 'Yes'], columns=['No', 'Yes']) print(cm) print "Accuracy is ", accuracy_score(y_test,y_pred)*100 ``` The test accuracy of our model is significant lower than our training result, this may indicate overfitting. we can go back and change the hyperparameters in the training process to reduce the dimension of the parameter space. ## 8.3.2 Fitting Regression Trees Here we fit a regression tree to the Boston data set. First, we create a training set, and fit the tree to the training data. Since Python does not support prune, let us fit the max_depth at 2. ``` boston = pd.read_csv('./data/Boston.csv') X = boston.drop('medv', axis=1) y = boston.medv X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, random_state=0) regr_tree = DecisionTreeRegressor(max_depth=2) regr_tree.fit(X_train, y_train) export_graphviz(regr_tree, out_file="mytree.dot", feature_names=X_train.columns) with open("mytree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) y_pred = regr_tree.predict(X_test) mean_squared_error(y_test, y_pred) ``` ## 8.3.3 Bagging and Random Forests Here we apply bagging and random forests to the Boston data, using the randomForest package in Python. The exact results obtained in this section may depend on the version of Python and the version of the randomForest package installed on your computer. Recall that bagging is simply a special case of a random forest with m = p. Therefore, the randomForest() function can be used to perform both random forests and bagging. We perform bagging as follows: ``` all_features = X_train.shape[1] regr_bagging = RandomForestRegressor(max_features=all_features, random_state=1) regr_bagging.fit(X_train, y_train) y_pred = regr_bagging.predict(X_test) mean_squared_error(y_test, y_pred) ``` We can grow a random forest in exactly the same way, except that we'll use a smaller value of the max_features argument. Here we'll use max_features = 3 (close to square root of 13) ``` regr_rf = RandomForestRegressor(max_features=3, random_state=1) regr_rf.fit(X_train, y_train) y_pred = regr_rf.predict(X_test) mean_squared_error(y_test, y_pred) ``` The test set MSE is even lower; this indicates that random forests yielded an improvement over bagging in this case. ``` Importance = pd.DataFrame({'Importance':regr_rf.feature_importances_*100}, index=X_train.columns) Importance.sort_values(by='Importance', axis=0, ascending=True).plot(kind='barh', color='r', ) plt.xlabel('Variable Importance') plt.gca().legend_ = None ``` ## 8.3.4 Boosting Here we use the GradientBoostingRegressor package. The argument n_estimators=500 indicates that we want 500 trees, and the option interaction.depth=4 limits the depth of each tree. ``` regr_boost = GradientBoostingRegressor(n_estimators=500, learning_rate=0.02, max_depth=4, random_state=1) regr_boost.fit(X_train, y_train) ``` Let us check the feature importance and MSE. ``` feature_importance = regr_boost.feature_importances_*100 rel_imp = pd.Series(feature_importance, index=X_train.columns).sort_values(inplace=False) rel_imp.T.plot(kind='barh', color='r', ) plt.xlabel('Variable Importance') plt.gca().legend_ = None y_pred = regr_boost.predict(X_test) mean_squared_error(y_test,y_pred) ```
github_jupyter
import numpy as np import pandas as pd from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier, export_graphviz, DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error from sklearn import tree import graphviz import matplotlib.pyplot as plt %matplotlib inline carseats = pd.read_csv('./data/Carseats.csv') carseats['High'] = carseats.Sales.map(lambda x: 'Y' if x>8 else 'N') carseats.ShelveLoc = pd.factorize(carseats.ShelveLoc)[0] carseats.Urban = carseats.Urban.map({'No':0, 'Yes':1}) carseats.US = carseats.US.map({'No':0, 'Yes':1}) carseats.info() X = carseats.drop(['Sales', 'High'], axis=1) y = carseats.High X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=200, random_state=0) clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100, max_depth=6, min_samples_leaf=4) clf_gini.fit(X_train, y_train) print clf_gini.score(X_train, y_train) export_graphviz(clf_gini, out_file="mytree.dot", feature_names=X_train.columns) with open("mytree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) y_pred = clf_gini.predict(X_test) cm = pd.DataFrame(confusion_matrix(y_test, y_pred).T, index=['No', 'Yes'], columns=['No', 'Yes']) print(cm) print "Accuracy is ", accuracy_score(y_test,y_pred)*100 boston = pd.read_csv('./data/Boston.csv') X = boston.drop('medv', axis=1) y = boston.medv X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, random_state=0) regr_tree = DecisionTreeRegressor(max_depth=2) regr_tree.fit(X_train, y_train) export_graphviz(regr_tree, out_file="mytree.dot", feature_names=X_train.columns) with open("mytree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) y_pred = regr_tree.predict(X_test) mean_squared_error(y_test, y_pred) all_features = X_train.shape[1] regr_bagging = RandomForestRegressor(max_features=all_features, random_state=1) regr_bagging.fit(X_train, y_train) y_pred = regr_bagging.predict(X_test) mean_squared_error(y_test, y_pred) regr_rf = RandomForestRegressor(max_features=3, random_state=1) regr_rf.fit(X_train, y_train) y_pred = regr_rf.predict(X_test) mean_squared_error(y_test, y_pred) Importance = pd.DataFrame({'Importance':regr_rf.feature_importances_*100}, index=X_train.columns) Importance.sort_values(by='Importance', axis=0, ascending=True).plot(kind='barh', color='r', ) plt.xlabel('Variable Importance') plt.gca().legend_ = None regr_boost = GradientBoostingRegressor(n_estimators=500, learning_rate=0.02, max_depth=4, random_state=1) regr_boost.fit(X_train, y_train) feature_importance = regr_boost.feature_importances_*100 rel_imp = pd.Series(feature_importance, index=X_train.columns).sort_values(inplace=False) rel_imp.T.plot(kind='barh', color='r', ) plt.xlabel('Variable Importance') plt.gca().legend_ = None y_pred = regr_boost.predict(X_test) mean_squared_error(y_test,y_pred)
0.503174
0.942876
<div align="right">Peter Norvig, Nov. 2017</div> # Bad Grade, Good Experience Recently I was asked a question I hadn't thought about before: > *As a student, did you ever get a bad grade on a programming assignment?* I've forgotten most of my assignments, but there is one I do remember. It was something like this: # The Concordance Assignment > *Using the [`Snobol`](http://www.snobol4.org/) language, read lines of text from the standard input and print a *concordance*, which is an alphabetized list of words in the text, with the line number(s) where each word appears. Words with different capitalization (like "A" and "a") should be merged into one entry.* After studying Snobol a bit, I realized that the expected solution was along these lines: 1. Create an empty `dict` (Snobol calls these "tables") whose keys will be words and values will be lists of line numbers. 2. Read the lines of text (tracking the line numbers), split them into words, and build up the list of line numbers for each word. 3. Convert the table into a two-dimensional `array` where each row has the two columns `[word, line_numbers]`. 4. Write a function to sort the array alphabetically (`sort` is not built-in to Snobol). 5. Write a function to print the array. That would be around 40 to 60 lines of code; an easy task. But I noticed three interesting things about Snobol: * There is an *indirection* operator, `$`, so if the variable `'X'` has the value `"A"`, then `'$X = i'` is the same as `'A = i'`. * Uninitialized variables are treated as the empty string, so `'A += "text"'` works even if we haven't seen `'A'` before. * When the program ends, the Snobol interpreter automatically prints the values of every variable, sorted alphabetically, as a debugging aid. That means I could do away with the `dict` and `array` data structures, eliminating steps 1, 3, 4, and 5, and just do step 2! # The Concordance Solution I ended up with a program similar to the following (translated from Snobol to Python, but with `'$word'` indirection): ``` program = """ for i, line in enumerate(input): for word in re.findall(r"\w+", line.upper()): $word += str(i) + ', ' """ ``` That's just 3 lines, not 40 to 60! To test the program, I'll write a mock Snobol/Python interpreter, which at heart is just a call to the Python interpreter, `exec(program)`, except that it handles the three things I noticed about the Snobol interpreter: * `$word` gets translated as `_context[word]`. * It calls `exec(program, _context)`, where `_context` is a `defaultdict(str)`, so variables default to `''`. * After the `exec` completes, the user-defined variables (but not the built-in ones) are printed. ``` from collections import defaultdict import re def snobol(program, data=''): """A Python interpreter with three Snobol-ish features: (1) $word indirection; (2) variables default to ''; (3) post-mortem dump.""" program = re.sub(r'\$(\w+)', r'_context[\1]', program) # (1) _context = defaultdict(str, vars(__builtins__)) # (2) _context.update(re=re, input=data.splitlines(), _context=_context) builtins = set(_context) try: exec(program, _context) finally: print('-' * 79) # (3) for name in sorted(_context): if not (name in builtins or name == '__builtins__'): print('{:10} = {}'.format(name, _context[name])) ``` Now we can run the program on some data: ``` data = """ There she was just a-walkin' down the street, Singin' "Do wah diddy diddy dum diddy do" Snappin' her fingers and shufflin' her feet, Singin' "Do wah diddy diddy dum diddy do" She looked good (looked good), she looked fine (looked fine) She looked good, she looked fine and I nearly lost my mind """ snobol(program, data) ``` Oops! The post-mortem printout includes the variables `i`, `line`, and `word`. Reluctantly, I increased the program's line count by 33%: ``` program = """ for i, line in enumerate(input): for word in re.findall(r"\w+", line.upper()): $word += str(i) + ', ' del i, line, word """ snobol(program, data) ``` Looks good to me! But sadly, the grader for the course did not agree, complaining that my program was not extensible: what if I wanted to cover two or more files in one run? What if I wanted the output to have a slightly different format? I argued that [YAGNI](https://en.wikipedia.org/wiki/You_aren%27t_gonna_need_it), and if the requirements changed, *then* I would write the necessary 40 or 60 lines, but there's no sense doing that until then. The grader was not impressed with my arguments and I got points taken off. Still, I was happy with my program. I felt like the purpose of the assignment was to get familiar with a new programming language with some different idioms/paradigms. By using the indirection operator I learned more about "thinking different" than if I had written the expected program. # TFW you flunk AI Here's another example that I had completely forgotten about until 2016, when I was cleaning out a filing cabinet and came across my old college transcript. It turns out that *I flunked an AI course!* (Or at least, didn't complete it.) This course was offered by Prof. Richard Millward in the Cognitive Science program. I certainly remember a lot of influential material from this class: we read David Marr, we read Winston's just-published *Psychology of Computer Vision*, we read a chapter from Duda and Hart which was then only a few years old. The things I learned in that course have stuck with me for decades, but one thing that didn't stick is that, according to my transcript, I never completed the course! I'm not sure what happened. I did an independent study with Ulf Grenander that semester; my best guess is that when I started doing the independent study that would have put me over some limit, and so I had to drop the AI course. So in both the concordance program and the Cognitive Science AI class, I had a great experience and I learned a lot, even if it wasn't well-reflected in official credit. The moral is: look for the good experiences, and don't worry about the official credit.
github_jupyter
program = """ for i, line in enumerate(input): for word in re.findall(r"\w+", line.upper()): $word += str(i) + ', ' """ from collections import defaultdict import re def snobol(program, data=''): """A Python interpreter with three Snobol-ish features: (1) $word indirection; (2) variables default to ''; (3) post-mortem dump.""" program = re.sub(r'\$(\w+)', r'_context[\1]', program) # (1) _context = defaultdict(str, vars(__builtins__)) # (2) _context.update(re=re, input=data.splitlines(), _context=_context) builtins = set(_context) try: exec(program, _context) finally: print('-' * 79) # (3) for name in sorted(_context): if not (name in builtins or name == '__builtins__'): print('{:10} = {}'.format(name, _context[name])) data = """ There she was just a-walkin' down the street, Singin' "Do wah diddy diddy dum diddy do" Snappin' her fingers and shufflin' her feet, Singin' "Do wah diddy diddy dum diddy do" She looked good (looked good), she looked fine (looked fine) She looked good, she looked fine and I nearly lost my mind """ snobol(program, data) program = """ for i, line in enumerate(input): for word in re.findall(r"\w+", line.upper()): $word += str(i) + ', ' del i, line, word """ snobol(program, data)
0.315947
0.875521
``` %matplotlib # %matplotlib inline import warnings warnings.filterwarnings('ignore') from params_ica import subject_ids, sessions from params_ica import main_path, data_path, preproc_pipeline_name from ipywidgets import widgets from IPython.display import display, clear_output, Javascript import mne from mne.io import read_raw_fif from mne.preprocessing import read_ica from mne.preprocessing import create_ecg_epochs, create_eog_epochs import numpy as np import getpass import os # Widget related imports from traitlets import Unicode # nbconvert related imports from nbconvert import get_export_names, export_by_name from nbconvert.writers import FilesWriter from nbformat import read, NO_CONVERT from nbconvert.utils.exceptions import ConversionException warnings.filterwarnings('ignore') ``` ## Choose subject ID: ``` name_sel = widgets.Select( description='Subject ID:', options=subject_ids ) display(name_sel) cond_sel = widgets.RadioButtons( description='Condition:', options=sessions, ) display(cond_sel) %%capture if cond_sel.value == sessions[0]: session = sessions[0] elif cond_sel.value == sessions[1]: session = sessions[1] subj_ID = name_sel.value data_path = os.path.join(main_path, subj_ID) pipeline_path = os.path.join(main_path, preproc_pipeline_name) sbj_data_path = os.path.join(main_path, subj_ID, session, 'meg') basename = subj_ID + '_task-rest_run-01_meg_raw_filt_dsamp' results_folder = os.path.join('preproc_meeg', '_sess_index_' + session + '_subject_id_' + subj_ID) raw_fname = basename + '.fif' ica_fname = basename + '_ica.fif' ica_TS_fname = basename + '_ica-tseries.fif' report_fname = basename + '-report.html' ica_solution_fname = basename + '_ica_solution.fif' raw_file = os.path.join(pipeline_path, results_folder, 'preproc', raw_fname) # filtered data raw_ica_file = os.path.join(pipeline_path, results_folder, 'ica', ica_fname) # cleaned data new_raw_ica_file = os.path.join(sbj_data_path, ica_fname) # path where to save the cleaned data after inspection ica_TS_file = os.path.join(pipeline_path, results_folder, 'ica', ica_TS_fname) ica_solution_file = os.path.join(pipeline_path, results_folder, 'ica', ica_solution_fname) report_file = os.path.join(pipeline_path, results_folder, 'ica', report_fname) ``` ## Load data ``` # Load data -> we load the filtered data to see the TS of all ICs print('Load raw file -> {} \n\n'.format(raw_file)) raw = read_raw_fif(raw_file, preload=True) ica = read_ica(ica_solution_file) ica.labels_ = dict() ica_TS = ica.get_sources(raw) ``` ## Cell below opens an html report in a web-browser ``` %%bash -s "$report_file" firefox -new-window $1 ica.exclude ica.plot_sources(raw) ica.plot_components(inst=raw) # ica.exclude if ica.exclude: ica.plot_properties(raw, picks=ica.exclude) ``` ## Exclude ICA components To exclude/include an ICA component **click on mne_browse window**: the **red** ones will be excluded. To keep the new excluded ICA components CLOSE the mne_browe window! ## Apply ica solution to raw data and save the result Check in the next cells if you are excluding the components you want!!! You can choose to save the cleaned raw file either in the ica folder (**raw_ica_file**) of workflow dir or in the subject folder (**new_raw_ica_file**) ``` print('You want to exclude the following components: *** {} ***'.format(ica.exclude)) %%capture ica.apply(raw) raw.save(raw_ica_file, overwrite=True) # save in workflow dir # raw.save(new_raw_ica_file, overwrite=True) # save in subject dir ica.save(ica_solution_file) print('You REMOVED the following components: *** {} *** \n'.format(ica.exclude)) print('You SAVED the new CLEANED file here: *** {} ***'.format(raw_ica_file)) ```
github_jupyter
%matplotlib # %matplotlib inline import warnings warnings.filterwarnings('ignore') from params_ica import subject_ids, sessions from params_ica import main_path, data_path, preproc_pipeline_name from ipywidgets import widgets from IPython.display import display, clear_output, Javascript import mne from mne.io import read_raw_fif from mne.preprocessing import read_ica from mne.preprocessing import create_ecg_epochs, create_eog_epochs import numpy as np import getpass import os # Widget related imports from traitlets import Unicode # nbconvert related imports from nbconvert import get_export_names, export_by_name from nbconvert.writers import FilesWriter from nbformat import read, NO_CONVERT from nbconvert.utils.exceptions import ConversionException warnings.filterwarnings('ignore') name_sel = widgets.Select( description='Subject ID:', options=subject_ids ) display(name_sel) cond_sel = widgets.RadioButtons( description='Condition:', options=sessions, ) display(cond_sel) %%capture if cond_sel.value == sessions[0]: session = sessions[0] elif cond_sel.value == sessions[1]: session = sessions[1] subj_ID = name_sel.value data_path = os.path.join(main_path, subj_ID) pipeline_path = os.path.join(main_path, preproc_pipeline_name) sbj_data_path = os.path.join(main_path, subj_ID, session, 'meg') basename = subj_ID + '_task-rest_run-01_meg_raw_filt_dsamp' results_folder = os.path.join('preproc_meeg', '_sess_index_' + session + '_subject_id_' + subj_ID) raw_fname = basename + '.fif' ica_fname = basename + '_ica.fif' ica_TS_fname = basename + '_ica-tseries.fif' report_fname = basename + '-report.html' ica_solution_fname = basename + '_ica_solution.fif' raw_file = os.path.join(pipeline_path, results_folder, 'preproc', raw_fname) # filtered data raw_ica_file = os.path.join(pipeline_path, results_folder, 'ica', ica_fname) # cleaned data new_raw_ica_file = os.path.join(sbj_data_path, ica_fname) # path where to save the cleaned data after inspection ica_TS_file = os.path.join(pipeline_path, results_folder, 'ica', ica_TS_fname) ica_solution_file = os.path.join(pipeline_path, results_folder, 'ica', ica_solution_fname) report_file = os.path.join(pipeline_path, results_folder, 'ica', report_fname) # Load data -> we load the filtered data to see the TS of all ICs print('Load raw file -> {} \n\n'.format(raw_file)) raw = read_raw_fif(raw_file, preload=True) ica = read_ica(ica_solution_file) ica.labels_ = dict() ica_TS = ica.get_sources(raw) %%bash -s "$report_file" firefox -new-window $1 ica.exclude ica.plot_sources(raw) ica.plot_components(inst=raw) # ica.exclude if ica.exclude: ica.plot_properties(raw, picks=ica.exclude) print('You want to exclude the following components: *** {} ***'.format(ica.exclude)) %%capture ica.apply(raw) raw.save(raw_ica_file, overwrite=True) # save in workflow dir # raw.save(new_raw_ica_file, overwrite=True) # save in subject dir ica.save(ica_solution_file) print('You REMOVED the following components: *** {} *** \n'.format(ica.exclude)) print('You SAVED the new CLEANED file here: *** {} ***'.format(raw_ica_file))
0.384797
0.641829
``` import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns plt.rcParams = plt.rcParamsOrig ``` # Direct Sampling Dari 30 orang yang Anda tunjukkan UI baru aplikasi Anda, 22 orang berkata mereka menyukainya. Apakah ini kebetulan? 📱 ## Metode Klasik $$ P(X \ge k) = 1 - \sum_{c=0}^{k-1} \binom{n}{c} p^c (1-p)^{n-c} $$ ``` import scipy.stats as ss x = np.arange(31) y = ss.binom(30, 0.5).pmf(x) plt.bar(x, y) plt.axvline(22, c='tab:red') plt.annotate("", xy=(24, 0.08), xytext=(22, 0.08), arrowprops=dict(arrowstyle="->", color='tab:red')) plt.xlabel('$k$') plt.ylabel('$P(X=k)$'); ``` ## Metode Sampling ``` # Kode Anda di sini ``` # Shuffling Diberikan data alokasi pupuk lama dan baru beserta hasil panennya seperti di bawah ini. Apakah pupuk baru berdampak pada hasil panen yang lebih banyak? 🍅 ``` x1 = np.array([29.2, 11.4, 25.3, 16.5, 21.1]) # 20.70 x2 = np.array([26.6, 23.7, 28.5, 14.2, 17.9, 24.3]) # 22.53 n1 = len(x1) n2 = len(x2) ``` ## Metode Klasik $$ t = \frac{\bar X_1 - \bar X_2}{\sqrt{\frac{s_1^2}{n_1} + \frac{s_2^2}{n_2}}} $$ ``` t = np.round( (x2.mean() - x1.mean()) / np.sqrt(np.var(x1, ddof=1)/n1 + np.var(x2, ddof=1)/n2), 3 ) ``` $$ \nu \approx \frac{\left(\frac{s_1^2}{N_1} + \frac{s_2^2}{N_2}\right)^2}{\frac{s_1^4}{N_1^2(N_1-1)} + \frac{s_2^4}{N_2^2(N_2-1)}} $$ ``` num = (np.var(x1, ddof=1)/n1 + np.var(x2, ddof=1)/n2) ** 2 denom = np.var(x1, ddof=1)**2/(n1**2 * (n1 - 1)) + np.var(x2, ddof=1)**2/(n2**2 * (n2 - 1)) nu = num / denom ss.t(nu).ppf(1 - 0.05) ``` ## Metode Engineer ``` from statsmodels.stats.weightstats import ttest_ind t, p, dof = ttest_ind( x2, x1, alternative='larger', usevar='unequal' ) p ``` ## Metode Sampling ``` np.random.seed(42) x = np.array([29.2, 11.4, 26.6, 23.7, 25.3, 28.5, 14.2, 17.9, 16.5, 21.1, 24.3]) diff = [] # Kode Anda di sini sns.histplot(x=diff, bins=30, element='step', fill=False) plt.axvline(1.83, c='tab:red') plt.annotate("", xy=(3, 400), xytext=(1.83, 400), arrowprops=dict(arrowstyle="->", color='tab:red')) plt.xlabel('$X$') plt.ylabel('freq'); ``` # Bootstrapping Seorang pengemudi ojek online mendapatkan order tiap harinya selama 21 hari sebagai berikut. Seberapa yakin dia dengan rata-rata jumlah order per harinya? 🛵 ``` np.random.seed(42) x = np.random.poisson(20, size=(3, 7)) sns.heatmap( x, square=True, cbar=False, annot=True, fmt='d', cmap='Greens' ) plt.xticks([]) plt.yticks([]); ``` ## Metode Klasik $$ \bar{X} = \frac{1}{N} \sum_{i=1}^N x_i $$ $$ \sigma_{\bar{x}} = \frac{1}{\sqrt{N}} \sqrt{\frac{1}{N-1} \sum_{i=1}^N (x_i - \bar{x})^2} $$ ``` # Kode Anda di sini ``` ## Metode Sampling ``` from matplotlib.animation import FuncAnimation from IPython.display import HTML plt.rcParams['animation.html'] = 'html5' fig, ax = plt.subplots() def update(frame): bg = np.array([0] * 20 + [1]) np.random.shuffle(bg) bg = bg.reshape(3, 7) ax.imshow(bg) for i in range(x.shape[0]): for j in range(x.shape[1]): c = 'k' if bg[i, j] == 1 else 'w' plt.text(j-0.1, i+0.1, x[i,j], c=c) plt.axis('off') return ax anim = FuncAnimation(fig, update, frames=21, interval=500) # anim.save('ojek.gif', writer='imagemagick', fps=21) anim; n_trials = 10_000 data = [] # Kode Anda di sini sns.histplot(x=data, bins=30) plt.xlabel('$\\bar x$') plt.title(f"order = {data.mean():.2f} $\pm$ {data.std():.2f}") plt.savefig('ojek-hist.png', bbox_inches='tight') ``` # Bootstrapped Linear Regression ``` np.random.seed(1) y = x.flatten() hour = y / (1.75) + np.random.normal(scale=0.7, size=21) fig, ax = plt.subplots(figsize=(7, 7)) sns.regplot( x=hour, y=y, ci=False, ax=ax ) plt.xlabel('jam kerja') plt.ylabel('order'); n_trials = 10_000 models = [] for _ in range(n_trials): i = np.random.randint(21, size=21) reg = ss.linregress(hour[i], y[i]) models.append((reg.intercept, reg.slope)) sns.jointplot( x='slope', y='intercept', data=pd.DataFrame(models, columns=['intercept', 'slope']), kind='hex' ); fig, ax = plt.subplots(figsize=(7, 7)) intercepts, slopes = np.array(models).T yhat = np.outer(intercepts, np.ones(21)) + np.outer(slopes, hour) sns.regplot( x=hour, y=y, ci=False, ax=ax ) plt.fill_between( hour, yhat.mean(axis=0) - yhat.std(axis=0), yhat.mean(axis=0) + yhat.std(axis=0), alpha=0.5, color='tab:orange' ); ``` # Cross Validation Bagaimana trend dari pertumbuhan jumlah kasus harian COVID-19 di Indonesia pada 100 hari pertama? 🦠 ``` # Sumber: Kawal COVID-19 # https://kawalcovid19.id/ df = pd.read_csv('https://docs.google.com/spreadsheets/d/1ma1T9hWbec1pXlwZ89WakRk-OfVUQZsOCFl4FwZxzVw/export?format=csv&gid=387345074') data = df['Kasus harian'].str.replace(',', '').astype(int).reset_index() train_data = data.head(100).copy() sns.regplot( x='index', y='Kasus harian', ci=None, data=train_data, fit_reg=True, marker='.', order=1, # orde polinomial line_kws=dict( color='tab:red', alpha=0.5 ) ) plt.xlabel('hari ke-i') plt.ylabel('jumlah kasus'); from sklearn.metrics import mean_squared_error def regress(x, y, degree): p = np.polyfit(x, y, degree) reg = np.poly1d(p) yhat = reg(x) return mean_squared_error(y, yhat) mses = [] degrees = range(1, 15) for degree in degrees: mses.append( regress(train_data['index'], train_data['Kasus harian'], degree) ) plt.plot(degrees, np.sqrt(mses)) plt.xlabel('orde polinomial') plt.ylabel('RMSE'); ``` ## Metode Sampling ``` train_data['label'] = np.random.randint(0, 2, size=len(train_data)) mask = train_data['label'] == 1 plt.scatter( x='index', y='Kasus harian', data=train_data, c=np.array(['tab:blue', 'tab:red'])[train_data.label], marker='.' ) plt.xlabel('hari ke-i') plt.ylabel('jumlah kasus'); fig, ax = plt.subplots(sharex=True, sharey=True, ncols=2) configs = dict( x='index', y='Kasus harian', ci=None, fit_reg=True, scatter=False, marker='.', order=2 ) sns.regplot( data=train_data[mask], color='tab:blue', ax=ax[0], **configs ) sns.regplot( data=train_data[~mask], color='tab:red', ax=ax[1], **configs ) new_configs = dict( x='index', y='Kasus harian', ci=None, fit_reg=False, scatter=True, marker='.' ) sns.regplot( data=train_data[~mask], color='tab:red', ax=ax[0], **new_configs ) sns.regplot( data=train_data[mask], color='tab:blue', ax=ax[1], **new_configs ) ax[0].set_xlabel('hari ke-i') ax[0].set_ylabel('jumlah kasus') ax[1].set_xlabel('hari ke-i') ax[1].set_ylabel(''); p = np.polyfit(train_data[~mask]['index'], train_data[~mask]['Kasus harian'], 2) reg = np.poly1d(p) y_true = train_data[mask]['Kasus harian'] y_pred = reg(train_data[mask]['index']) np.sqrt(mean_squared_error(y_true, y_pred)) def cross_validate(x, y, degree: int, n_fold: int, metric) -> np.ndarray: # Kode Anda di sini pass np.random.seed(42) mses = [] emses = [] degrees = range(1, 15) for degree in degrees: mses.append( regress(train_data['index'], train_data['Kasus harian'], degree) ) emses.append( cross_validate( train_data['index'], train_data['Kasus harian'], degree=degree, n_fold=2, metric=mean_squared_error ).mean() ) plt.plot(degrees, np.sqrt(mses)) plt.plot(degrees, np.sqrt(emses)) plt.xlabel('orde polinomial') plt.ylabel('RMSE') plt.legend(['RMSE', 'CV-RMSE']); p = np.polyfit(train_data['index'], train_data['Kasus harian'], 7) reg = np.poly1d(p) ax = plt.gca() data['Kasus harian'].plot.line(label='aktual', ax=ax) ax.plot(reg(data['index']), label='prediksi') axins = ax.inset_axes([0.1, 0.2, 0.5, 0.5]) data['Kasus harian'].head(200).plot.line(label='aktual', ax=axins) axins.plot(reg(data['index'].head(200)), label='prediksi') x1, x2, y1, y2 = 0, 200, 0, 4000 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') ax.indicate_inset_zoom(axins, edgecolor="black") plt.legend(['aktual', 'prediksi']); ``` # Bonus Seorang penikmat musik sedang shuffle play lagu soundtrack Charlie's Angels terbaru. Di albumnya ada 11 lagu, 6 di antaranya lagu Ariana Grande. Berapa peluangnya didapatkan lagu Ariana Grande tiga kali berturut-turut? (Sumber: [Twitter](https://twitter.com/waribowo_/status/1196722307722444802))
github_jupyter
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns plt.rcParams = plt.rcParamsOrig import scipy.stats as ss x = np.arange(31) y = ss.binom(30, 0.5).pmf(x) plt.bar(x, y) plt.axvline(22, c='tab:red') plt.annotate("", xy=(24, 0.08), xytext=(22, 0.08), arrowprops=dict(arrowstyle="->", color='tab:red')) plt.xlabel('$k$') plt.ylabel('$P(X=k)$'); # Kode Anda di sini x1 = np.array([29.2, 11.4, 25.3, 16.5, 21.1]) # 20.70 x2 = np.array([26.6, 23.7, 28.5, 14.2, 17.9, 24.3]) # 22.53 n1 = len(x1) n2 = len(x2) t = np.round( (x2.mean() - x1.mean()) / np.sqrt(np.var(x1, ddof=1)/n1 + np.var(x2, ddof=1)/n2), 3 ) num = (np.var(x1, ddof=1)/n1 + np.var(x2, ddof=1)/n2) ** 2 denom = np.var(x1, ddof=1)**2/(n1**2 * (n1 - 1)) + np.var(x2, ddof=1)**2/(n2**2 * (n2 - 1)) nu = num / denom ss.t(nu).ppf(1 - 0.05) from statsmodels.stats.weightstats import ttest_ind t, p, dof = ttest_ind( x2, x1, alternative='larger', usevar='unequal' ) p np.random.seed(42) x = np.array([29.2, 11.4, 26.6, 23.7, 25.3, 28.5, 14.2, 17.9, 16.5, 21.1, 24.3]) diff = [] # Kode Anda di sini sns.histplot(x=diff, bins=30, element='step', fill=False) plt.axvline(1.83, c='tab:red') plt.annotate("", xy=(3, 400), xytext=(1.83, 400), arrowprops=dict(arrowstyle="->", color='tab:red')) plt.xlabel('$X$') plt.ylabel('freq'); np.random.seed(42) x = np.random.poisson(20, size=(3, 7)) sns.heatmap( x, square=True, cbar=False, annot=True, fmt='d', cmap='Greens' ) plt.xticks([]) plt.yticks([]); # Kode Anda di sini from matplotlib.animation import FuncAnimation from IPython.display import HTML plt.rcParams['animation.html'] = 'html5' fig, ax = plt.subplots() def update(frame): bg = np.array([0] * 20 + [1]) np.random.shuffle(bg) bg = bg.reshape(3, 7) ax.imshow(bg) for i in range(x.shape[0]): for j in range(x.shape[1]): c = 'k' if bg[i, j] == 1 else 'w' plt.text(j-0.1, i+0.1, x[i,j], c=c) plt.axis('off') return ax anim = FuncAnimation(fig, update, frames=21, interval=500) # anim.save('ojek.gif', writer='imagemagick', fps=21) anim; n_trials = 10_000 data = [] # Kode Anda di sini sns.histplot(x=data, bins=30) plt.xlabel('$\\bar x$') plt.title(f"order = {data.mean():.2f} $\pm$ {data.std():.2f}") plt.savefig('ojek-hist.png', bbox_inches='tight') np.random.seed(1) y = x.flatten() hour = y / (1.75) + np.random.normal(scale=0.7, size=21) fig, ax = plt.subplots(figsize=(7, 7)) sns.regplot( x=hour, y=y, ci=False, ax=ax ) plt.xlabel('jam kerja') plt.ylabel('order'); n_trials = 10_000 models = [] for _ in range(n_trials): i = np.random.randint(21, size=21) reg = ss.linregress(hour[i], y[i]) models.append((reg.intercept, reg.slope)) sns.jointplot( x='slope', y='intercept', data=pd.DataFrame(models, columns=['intercept', 'slope']), kind='hex' ); fig, ax = plt.subplots(figsize=(7, 7)) intercepts, slopes = np.array(models).T yhat = np.outer(intercepts, np.ones(21)) + np.outer(slopes, hour) sns.regplot( x=hour, y=y, ci=False, ax=ax ) plt.fill_between( hour, yhat.mean(axis=0) - yhat.std(axis=0), yhat.mean(axis=0) + yhat.std(axis=0), alpha=0.5, color='tab:orange' ); # Sumber: Kawal COVID-19 # https://kawalcovid19.id/ df = pd.read_csv('https://docs.google.com/spreadsheets/d/1ma1T9hWbec1pXlwZ89WakRk-OfVUQZsOCFl4FwZxzVw/export?format=csv&gid=387345074') data = df['Kasus harian'].str.replace(',', '').astype(int).reset_index() train_data = data.head(100).copy() sns.regplot( x='index', y='Kasus harian', ci=None, data=train_data, fit_reg=True, marker='.', order=1, # orde polinomial line_kws=dict( color='tab:red', alpha=0.5 ) ) plt.xlabel('hari ke-i') plt.ylabel('jumlah kasus'); from sklearn.metrics import mean_squared_error def regress(x, y, degree): p = np.polyfit(x, y, degree) reg = np.poly1d(p) yhat = reg(x) return mean_squared_error(y, yhat) mses = [] degrees = range(1, 15) for degree in degrees: mses.append( regress(train_data['index'], train_data['Kasus harian'], degree) ) plt.plot(degrees, np.sqrt(mses)) plt.xlabel('orde polinomial') plt.ylabel('RMSE'); train_data['label'] = np.random.randint(0, 2, size=len(train_data)) mask = train_data['label'] == 1 plt.scatter( x='index', y='Kasus harian', data=train_data, c=np.array(['tab:blue', 'tab:red'])[train_data.label], marker='.' ) plt.xlabel('hari ke-i') plt.ylabel('jumlah kasus'); fig, ax = plt.subplots(sharex=True, sharey=True, ncols=2) configs = dict( x='index', y='Kasus harian', ci=None, fit_reg=True, scatter=False, marker='.', order=2 ) sns.regplot( data=train_data[mask], color='tab:blue', ax=ax[0], **configs ) sns.regplot( data=train_data[~mask], color='tab:red', ax=ax[1], **configs ) new_configs = dict( x='index', y='Kasus harian', ci=None, fit_reg=False, scatter=True, marker='.' ) sns.regplot( data=train_data[~mask], color='tab:red', ax=ax[0], **new_configs ) sns.regplot( data=train_data[mask], color='tab:blue', ax=ax[1], **new_configs ) ax[0].set_xlabel('hari ke-i') ax[0].set_ylabel('jumlah kasus') ax[1].set_xlabel('hari ke-i') ax[1].set_ylabel(''); p = np.polyfit(train_data[~mask]['index'], train_data[~mask]['Kasus harian'], 2) reg = np.poly1d(p) y_true = train_data[mask]['Kasus harian'] y_pred = reg(train_data[mask]['index']) np.sqrt(mean_squared_error(y_true, y_pred)) def cross_validate(x, y, degree: int, n_fold: int, metric) -> np.ndarray: # Kode Anda di sini pass np.random.seed(42) mses = [] emses = [] degrees = range(1, 15) for degree in degrees: mses.append( regress(train_data['index'], train_data['Kasus harian'], degree) ) emses.append( cross_validate( train_data['index'], train_data['Kasus harian'], degree=degree, n_fold=2, metric=mean_squared_error ).mean() ) plt.plot(degrees, np.sqrt(mses)) plt.plot(degrees, np.sqrt(emses)) plt.xlabel('orde polinomial') plt.ylabel('RMSE') plt.legend(['RMSE', 'CV-RMSE']); p = np.polyfit(train_data['index'], train_data['Kasus harian'], 7) reg = np.poly1d(p) ax = plt.gca() data['Kasus harian'].plot.line(label='aktual', ax=ax) ax.plot(reg(data['index']), label='prediksi') axins = ax.inset_axes([0.1, 0.2, 0.5, 0.5]) data['Kasus harian'].head(200).plot.line(label='aktual', ax=axins) axins.plot(reg(data['index'].head(200)), label='prediksi') x1, x2, y1, y2 = 0, 200, 0, 4000 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') ax.indicate_inset_zoom(axins, edgecolor="black") plt.legend(['aktual', 'prediksi']);
0.495361
0.896659
<center> <h1>Herramientas Informáticas<br></br>para la Investigación Interdisciplinaria</h1> </center> <br></br> * Profesor: <a href="http://www.pucp.edu.pe/profesor/jose-manuel-magallanes/" target="_blank">Dr. José Manuel Magallanes, PhD</a> ([[email protected]](mailto:[email protected]))<br> * Profesor del **Departamento de Ciencias Sociales, Pontificia Universidad Católica del Peru**. * Senior Data Scientist del **eScience Institute** and Visiting Professor at **Evans School of Public Policy and Governance, University of Washington**. * Fellow Catalyst, **Berkeley Initiative for Transparency in Social Sciences, UC Berkeley**. * Research Associate, **Center for Social Complexity, George Mason University**. <a id='beginning'></a> ## Sesión 3: Pre Procesamiento de Datos Vamos a realizar dos procesos en esta etapa de pre-procesamiento: * [Limpieza](#limpieza) * [Formato](#formato) Cuando hablamos de limpieza nos referiremos a verificar que la data haya sido leída adecuadamente, y que no estén presentes caracteres extraños que "desorienten" a los cálculos posteriores. Cuando hablamos de formato, nos referimos a que los datos limpios representen adecuadamente los valores o estructuras que el tratamiento metodológico posterior requiere. Como ves, usamos Jupyter, pues nos permite ver *lo que está pasando* con los datos, de mejor manera de lo que ofrece RStudio. <a id='limpieza'></a> ## Parte A: Limpieza de Data El pre procesamiento de datos es la parte más tediosa del proceso de investigación. Esta primera parte delata diversos problemas que se tienen con los datos reales que están en la web, como la que vemos a continuación: ``` import IPython linkIndexes="https://en.wikipedia.org/wiki/List_of_freedom_indices" weblinkIndexes = '<iframe src=' + linkIndexes + ' width=700 height=350></iframe>' IPython.display.HTML(weblinkIndexes) ``` Recuerda inspeccionar la tabla para encontrar algun atributo que sirva para su descarga. De ahí, continúa. Para trabajar con tablas, necesitaremos la ayuda de **Pandas**. Verifica qué versión de Pandas tienes: ``` # si obtienes error es por que no lo has instalado import pandas as pd pd.__version__ ``` Si la versión es 23, continúa, sino, actualizalo. ``` # antes instala'beautifulsoup4' # es posible que necesites salir y volver a cargar notebook wikiTables=pd.read_html(linkIndexes, header=0,#titulos están en primera fila: Python cuenta desde '0' flavor='bs4', #socio para rescatar texto de html attrs={'class': 'wikitable sortable'})#atributo buscado ``` La función *read_html* ha traido las **wikitablas** que hay en esa página de Wikipedia. Veamos cuantas tablas hay: ``` # cuantas tablas tenemos? len(wikiTables) ``` Es importante saber qué estructura se ha utilizado para almacenar las tablas traidas, aunque sólo haya sido una: ``` # las tenemos en: type(wikiTables) ``` Entonces, nuestro tabla (o *dataframe*) será el primer elemento de esa lista: ``` type(wikiTables[0]) ``` De ahi que, para tener la tabla: ``` DF=wikiTables[0] #primera mirada DF.head() ``` La limpieza requiere estrategia. Lo primero que salta a la vista, son los _footnotes_ que están en los títulos: ``` DF.columns ``` Podrias intentar poner nombres nuevos y alterar los anteriores, pero pensemos en una estrategia donde tendrías muchas columnas. En ese caso, es mejor eliminar los errores sin importar cuantas columnas hay: ``` import re # debe estar instalado. # encuentra uno o más espacios: \\s+ # encuentra uno o mas numeros \\d+ # encuentra un bracket que abre \\[ # encuentra un bracket que cierra \\] pattern='\\s+|\\d+|\\[|\\]' # cuando alguno de estos aparezca replacer='' # reemplazalo por esto ``` Ya tengo nuevos titulos de columna (headers). Ahora creo nuevos nombres: ``` newHeaders=[re.sub(pattern,replacer,element) for element in DF.columns] ``` Preparemos los cambios. Hay que preparar los *matches* entre lo antiguo y lo nuevo. Usemos el comando *zip*: ``` list(zip(DF.columns,newHeaders)) # tenemos que crear un 'diccionario' usando la anterior: {old:new for old,new in zip(DF.columns,newHeaders)} ``` El *dict* tiene lo que necesito. Eso lo uso en la función *rename* de Pandas: ``` changeMatch={old:new for old,new in zip(DF.columns,newHeaders)} DF.rename(columns=changeMatch,inplace=True) # ahora tenemos: DF.head() ``` Los contenidos de las celdas son texto, veamos si todas se han escrito de la manera correcta: ``` DF.FreedomintheWorld.value_counts() DF.IndexofEconomicFreedom.value_counts() DF.PressFreedomIndex.value_counts() DF.DemocracyIndex.value_counts() ``` No hay problema con los contenidos. [Ir a inicio](#beginning) ____ <a id='formato'></a> ## Parte B: Formateando Valores en Python ### Las escalas de medición Para saber si están en la escala correcta, debemos usar _dtypes_: ``` DF.dtypes ``` Los cuatro indices son categorías, no texto (_object_). Hagamos la conversión: ``` headers=DF.columns # guardando los nombres de todas las columnas # cambiar desde la segunda columna en adelante '[1:]': DF[headers[1:]]=DF[headers[1:]].astype('category') # obtenemos: DF.dtypes ``` Este cambio es imperceptible a la vista: ``` DF.head() ``` Mientras no sean variables categóricas no podemos utilizar las funciones que tiene Pandas para esas variables. Por ejemplo, pidamos los modalidades: ``` DF.FreedomintheWorld.cat.categories DF.IndexofEconomicFreedom.cat.categories DF.PressFreedomIndex.cat.categories DF.DemocracyIndex.cat.categories ``` Vemos que tenemos hasta 5 niveles en 2 variables, y 3 y 4 niveles en otras. De ahi que lo prudente es encontrar la distribución común de valores que refleja la ordinalidad, y los máximos y mínimos. Veamos los pasos iniciales: ``` #guardando en una lista las modalidades de la variable Freedom in the world: oldWorld=list(DF.FreedomintheWorld.cat.categories) # que es: oldWorld # usando palabras que representen la ordinalidad, # pero que puedan ser usadas en las otras variables # DEBEN crearse en el mismo orden que 'oldWorld' newWorld=['very good','very bad','middle'] # cambiar match entre lo antiguo por lo nuevo: recodeWorld={old:new for old,new in zip (oldWorld,newWorld)} recodeWorld ``` Con el dict *recodeWorld* puedo renombrar luego las categorías. Preparemos ahora los dicts para las otras variables: ``` oldEco=list(DF.IndexofEconomicFreedom.cat.categories) newEco=['very good','middle','good','bad','very bad'] recodeEco={old:new for old,new in zip (oldEco,newEco)} oldPress=list(DF.PressFreedomIndex.cat.categories) newPress=['bad','very good','middle','good','very bad'] recodePress={old:new for old,new in zip (oldPress,newPress)} oldDemo=list(DF.DemocracyIndex.cat.categories) newDemo=['very bad','good','very good','bad'] recodeDemo={old:new for old,new in zip (oldDemo,newDemo)} ``` Ahora usamos los dicts creados para recodificar: ``` DF.FreedomintheWorld.cat.rename_categories(recodeWorld,inplace=True) DF.IndexofEconomicFreedom.cat.rename_categories(recodeEco,inplace=True) DF.PressFreedomIndex.cat.rename_categories(recodePress,inplace=True) DF.DemocracyIndex.cat.rename_categories(recodeDemo,inplace=True) ``` Veamos como quedó: ``` DF.head() ``` Los datos aun no son ordinales, pero aqui serán: ``` # creemos la secuencia: from pandas.api.types import CategoricalDtype sequence=['very good','good','middle','bad','very bad'] ordinal = CategoricalDtype(categories=sequence, ordered=True) #aquí está la secuencia pero con propiedades ordinal # apliquemos la secuencia con sus propiedades a la data: DF[headers[1:]]=DF[headers[1:]].astype(ordinal) # asi va: DF.head() ``` Notemos que las modalidades no usadas están presentes: ``` DF.FreedomintheWorld.value_counts(sort=False,dropna=False) ``` Verificaciones adicionales: ``` #las categorias: DF.PressFreedomIndex.cat.categories #tipo de escala? DF.PressFreedomIndex.cat.ordered ``` [Ir a inicio](#beginning) ____ <a id='monotony'></a> ### Cambio de Monotonía: Verifiquemos si está bien la asignación que hemos hecho: ``` DF.PressFreedomIndex.head() DF.PressFreedomIndex.max() ``` Este es un caso donde quiza la intensidad creciente debe ser hacia el sentido positivo del concepto. Claro que pudimos hacerlo al inicio, pero aprovechemos para saber cómo se hace. Para ello crearé una función: ``` # la función recibe una columna: def changeMonotony(aColumn): # Invierto las categorias de la columna: newOrder= aColumn.cat.categories[::-1] # [::-1] reverses the list. # se retorna columa con modalidades reordenadas: return aColumn.cat.reorder_categories(newOrder,ordered=True) ``` Esta función la aplica de nuevo, columna por columna: ``` DF[headers[1:]]=DF[headers[1:]].apply(changeMonotony) ``` ¿Funcionó? ``` DF.PressFreedomIndex.head() DF.PressFreedomIndex.max() ``` Tenemos aun valores perdidos, por lo que podríamos convertirlos las categorías en números para realizar alguno calculos para una imputación simple: ``` oldlevels=['very bad','bad','middle','good','very good'] newlevels=[1,2,3,4,5] recodeMatch={old:new for old,new in zip (oldlevels,newlevels)} renamer=lambda column: column.cat.rename_categories(recodeMatch) DF[headers[1:]]=DF[headers[1:]].apply(renamer) DF.head(10) ``` La función para reemplazarlos es sencilla, pero hay que evitar facilismos. Veamos: ``` #recordar: DF.dtypes #tienen que ser numericos: DF[headers[1:]]=DF[headers[1:]].astype(dtype='float',errors='ignore') # ahora: DF.dtypes DF.head(10) ``` Veamos qué variables tienen menos valores perdidos: ``` # sumo los perdidos en cada una: DF.isnull().sum() ``` Como la *FreedomintheWorld* es quien tiene menos perdidos, debo calcular la mediana de cada variable, segun el nivel de *FreedomintheWorld*: ``` #mediana por grupos: DF.groupby(headers[1])[headers[2:]].median() ``` Lo que veo tiene sentido, entonces lo lógico sería que la mediana de cada uno de estos subgrupos reemplace a los perdidos de cada subgrupo. Osea: ``` # mas facil: for col in headers[2:]: DF[col].fillna(DF.groupby(["FreedomintheWorld"])[col].transform("median"), inplace=True) ``` Obteniendo: ``` DF.head(10) ``` Si pensamos trabajar en R, recordemos que si grabamos un archivo en CSV, no podremos pasarle al R etiquetas. En todo caso, podríamos tener un grupo de columnas que funcionen como etiquetas. Creemos una copia: ``` DF2=DF.copy() ``` En esa copia ponemos las etiquetas, por lo que primero convertimos los numeros a categoría: ``` DF2[headers[1:]]=DF2[headers[1:]].astype('category') # podemos poder ordinal, pero se perdería la info ``` Aun no veremos mayor cambio, pero estos ya son ordinales: ``` DF2.head() ``` Aquí las recodificamos: ``` # mapa de recodificacion newlevels2=['1 very bad','2 bad','3 middle','4 good','5 very good'] oldlevels2=[1,2,3,4,5] recodeMatch2={old:new for old,new in zip (oldlevels2,newlevels2)} # aplicando función de recodificacion renamer=lambda column: column.cat.rename_categories(recodeMatch2) DF2[headers[1:]]=DF2[headers[1:]].apply(renamer) DF2.head(10) ``` Podríamos poner titulares más sencillo a esta data: ``` DF2.columns=["Country","WorldFreedom","EconomicFreedom","PressFreedom","Democracy"] ``` Ahora concatenamos lo creado al original: ``` frames=[DF,DF2.iloc[:,1:]] DF=pd.concat(frames,axis=1) ``` Podríamos añadir una variable de tipo numérico a nuestros datos: ``` gdpLink="https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(PPP)_per_capita" gdpTables=pd.read_html(gdpLink,header=0, flavor='bs4', attrs={'class': 'wikitable sortable'}) # cuantas tenemos: len(gdpTables) # selecciones la tercera: gdpTables[2].head() # quedemosnos con la segunda y tercera fila DFgdp=gdpTables[2].iloc[:,1:3] # confirmemos que los tipos son adecuados: DFgdp.dtypes # cambiemos esos nombres: DFgdp.columns=["Country","gdp"] DFgdp.head() #comparemos tamaños: DF.shape, DFgdp.shape # consultemos que saldrá al hacer el merge: DFgdp.merge(DF,on="Country").shape DFtotal=DFgdp.merge(DF,on="Country") # como quedó: DFtotal.head() # quedemonos con las filas con datos completos, ya no imputemos: DFtotal.dropna(inplace=True) ``` **Guardando archivo** A esta altura es bueno guardar el archivo, pues ya está listo: ``` #DFtotal.to_csv("indexes.csv",index=None) ``` ____ * [Ir a inicio](#beginning) * [Menú Principal](https://reproducibilidad.github.io/TallerChile/) _____ **AUSPICIO**: El desarrollo de estos contenidos ha sido posible gracias al grant del Berkeley Initiative for Transparency in the Social Sciences (BITSS) at the Center for Effective Global Action (CEGA) at the University of California, Berkeley <center> <img src="https://github.com/MAGALLANESJoseManuel/BITSS_ToolsWorkshop/raw/master/LogoBitss.jpg" style="width: 300px;"/> </center> **RECONOCIMIENTO** <!-- EL Dr. Magallanes agradece a la Pontificia Universidad Católica del Perú, por su apoyo en la elaboración de este trabajo. <center> <img src="https://github.com/MAGALLANESJoseManuel/BITSS_ToolsWorkshop/raw/master/LogoPUCP.jpg" style="width: 200px;"/> </center> --> El autor reconoce el apoyo que el eScience Institute de la Universidad de Washington le ha brindado desde el 2015 para desarrollar su investigación en Ciencia de Datos. <center> <img src="https://github.com/MAGALLANESJoseManuel/BITSS_ToolsWorkshop/raw/master/LogoES.png" style="width: 300px;"/> </center> <br> <br>
github_jupyter
import IPython linkIndexes="https://en.wikipedia.org/wiki/List_of_freedom_indices" weblinkIndexes = '<iframe src=' + linkIndexes + ' width=700 height=350></iframe>' IPython.display.HTML(weblinkIndexes) # si obtienes error es por que no lo has instalado import pandas as pd pd.__version__ # antes instala'beautifulsoup4' # es posible que necesites salir y volver a cargar notebook wikiTables=pd.read_html(linkIndexes, header=0,#titulos están en primera fila: Python cuenta desde '0' flavor='bs4', #socio para rescatar texto de html attrs={'class': 'wikitable sortable'})#atributo buscado # cuantas tablas tenemos? len(wikiTables) # las tenemos en: type(wikiTables) type(wikiTables[0]) DF=wikiTables[0] #primera mirada DF.head() DF.columns import re # debe estar instalado. # encuentra uno o más espacios: \\s+ # encuentra uno o mas numeros \\d+ # encuentra un bracket que abre \\[ # encuentra un bracket que cierra \\] pattern='\\s+|\\d+|\\[|\\]' # cuando alguno de estos aparezca replacer='' # reemplazalo por esto newHeaders=[re.sub(pattern,replacer,element) for element in DF.columns] list(zip(DF.columns,newHeaders)) # tenemos que crear un 'diccionario' usando la anterior: {old:new for old,new in zip(DF.columns,newHeaders)} changeMatch={old:new for old,new in zip(DF.columns,newHeaders)} DF.rename(columns=changeMatch,inplace=True) # ahora tenemos: DF.head() DF.FreedomintheWorld.value_counts() DF.IndexofEconomicFreedom.value_counts() DF.PressFreedomIndex.value_counts() DF.DemocracyIndex.value_counts() DF.dtypes headers=DF.columns # guardando los nombres de todas las columnas # cambiar desde la segunda columna en adelante '[1:]': DF[headers[1:]]=DF[headers[1:]].astype('category') # obtenemos: DF.dtypes DF.head() DF.FreedomintheWorld.cat.categories DF.IndexofEconomicFreedom.cat.categories DF.PressFreedomIndex.cat.categories DF.DemocracyIndex.cat.categories #guardando en una lista las modalidades de la variable Freedom in the world: oldWorld=list(DF.FreedomintheWorld.cat.categories) # que es: oldWorld # usando palabras que representen la ordinalidad, # pero que puedan ser usadas en las otras variables # DEBEN crearse en el mismo orden que 'oldWorld' newWorld=['very good','very bad','middle'] # cambiar match entre lo antiguo por lo nuevo: recodeWorld={old:new for old,new in zip (oldWorld,newWorld)} recodeWorld oldEco=list(DF.IndexofEconomicFreedom.cat.categories) newEco=['very good','middle','good','bad','very bad'] recodeEco={old:new for old,new in zip (oldEco,newEco)} oldPress=list(DF.PressFreedomIndex.cat.categories) newPress=['bad','very good','middle','good','very bad'] recodePress={old:new for old,new in zip (oldPress,newPress)} oldDemo=list(DF.DemocracyIndex.cat.categories) newDemo=['very bad','good','very good','bad'] recodeDemo={old:new for old,new in zip (oldDemo,newDemo)} DF.FreedomintheWorld.cat.rename_categories(recodeWorld,inplace=True) DF.IndexofEconomicFreedom.cat.rename_categories(recodeEco,inplace=True) DF.PressFreedomIndex.cat.rename_categories(recodePress,inplace=True) DF.DemocracyIndex.cat.rename_categories(recodeDemo,inplace=True) DF.head() # creemos la secuencia: from pandas.api.types import CategoricalDtype sequence=['very good','good','middle','bad','very bad'] ordinal = CategoricalDtype(categories=sequence, ordered=True) #aquí está la secuencia pero con propiedades ordinal # apliquemos la secuencia con sus propiedades a la data: DF[headers[1:]]=DF[headers[1:]].astype(ordinal) # asi va: DF.head() DF.FreedomintheWorld.value_counts(sort=False,dropna=False) #las categorias: DF.PressFreedomIndex.cat.categories #tipo de escala? DF.PressFreedomIndex.cat.ordered DF.PressFreedomIndex.head() DF.PressFreedomIndex.max() # la función recibe una columna: def changeMonotony(aColumn): # Invierto las categorias de la columna: newOrder= aColumn.cat.categories[::-1] # [::-1] reverses the list. # se retorna columa con modalidades reordenadas: return aColumn.cat.reorder_categories(newOrder,ordered=True) DF[headers[1:]]=DF[headers[1:]].apply(changeMonotony) DF.PressFreedomIndex.head() DF.PressFreedomIndex.max() oldlevels=['very bad','bad','middle','good','very good'] newlevels=[1,2,3,4,5] recodeMatch={old:new for old,new in zip (oldlevels,newlevels)} renamer=lambda column: column.cat.rename_categories(recodeMatch) DF[headers[1:]]=DF[headers[1:]].apply(renamer) DF.head(10) #recordar: DF.dtypes #tienen que ser numericos: DF[headers[1:]]=DF[headers[1:]].astype(dtype='float',errors='ignore') # ahora: DF.dtypes DF.head(10) # sumo los perdidos en cada una: DF.isnull().sum() #mediana por grupos: DF.groupby(headers[1])[headers[2:]].median() # mas facil: for col in headers[2:]: DF[col].fillna(DF.groupby(["FreedomintheWorld"])[col].transform("median"), inplace=True) DF.head(10) DF2=DF.copy() DF2[headers[1:]]=DF2[headers[1:]].astype('category') # podemos poder ordinal, pero se perdería la info DF2.head() # mapa de recodificacion newlevels2=['1 very bad','2 bad','3 middle','4 good','5 very good'] oldlevels2=[1,2,3,4,5] recodeMatch2={old:new for old,new in zip (oldlevels2,newlevels2)} # aplicando función de recodificacion renamer=lambda column: column.cat.rename_categories(recodeMatch2) DF2[headers[1:]]=DF2[headers[1:]].apply(renamer) DF2.head(10) DF2.columns=["Country","WorldFreedom","EconomicFreedom","PressFreedom","Democracy"] frames=[DF,DF2.iloc[:,1:]] DF=pd.concat(frames,axis=1) gdpLink="https://en.wikipedia.org/wiki/List_of_countries_by_GDP_(PPP)_per_capita" gdpTables=pd.read_html(gdpLink,header=0, flavor='bs4', attrs={'class': 'wikitable sortable'}) # cuantas tenemos: len(gdpTables) # selecciones la tercera: gdpTables[2].head() # quedemosnos con la segunda y tercera fila DFgdp=gdpTables[2].iloc[:,1:3] # confirmemos que los tipos son adecuados: DFgdp.dtypes # cambiemos esos nombres: DFgdp.columns=["Country","gdp"] DFgdp.head() #comparemos tamaños: DF.shape, DFgdp.shape # consultemos que saldrá al hacer el merge: DFgdp.merge(DF,on="Country").shape DFtotal=DFgdp.merge(DF,on="Country") # como quedó: DFtotal.head() # quedemonos con las filas con datos completos, ya no imputemos: DFtotal.dropna(inplace=True) #DFtotal.to_csv("indexes.csv",index=None)
0.304559
0.934873
``` import os import sys import matplotlib.pyplot as plt import IPython.display as ipd import pandas as pd import re import subprocess import numpy as np import math %load_ext autoreload %autoreload 2 %matplotlib inline sys.path.append('../audioset_tagging_cnn/') sys.path.append('../src') import logging import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from pytorch.models import * from utils.notebooks_utils import * data_root = '/media/maxim/SStorage/FG_2020/' reduced_sample_rate = 'data/Reduced_sample_rate' reduced_sample_rate_path = os.path.join(data_root, reduced_sample_rate) separated_audio = 'data/Separated_audio' separated_audio_path = os.path.join(data_root, separated_audio) labels = 'labels/downgraded' labels_path = os.path.join(data_root, labels) features = 'features' log_root = '/media/maxim/SStorage/FG_2020/logs/' tb_log_root = '/media/maxim/SStorage/FG_2020/logs/tb/' features_type = 'mel_64x32' batch_size = 128 class_names = ['Neutral', 'Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise'] def apply_window(features, labels, window_width): features = np.transpose(features) ratio = int(len(features) / len(labels)) x_center = int((window_width - 1) / 2) res_x = [] res_y = [] for idx, lab in enumerate(labels): x_arr = None if idx < x_center: x_arr = np.concatenate((np.flip(features[(idx + 1) * ratio: (idx + x_center + 1) * ratio]), features[idx * ratio: (idx + 1) * ratio], features[(idx + 1) * ratio: (idx + x_center + 1) * ratio]), axis=0) x_arr = x_arr[0:window_width * x_center * ratio] elif len(features) < (idx + x_center + 1) * ratio: x_arr = np.concatenate((features[(idx - x_center) * ratio: idx * ratio], features[idx * ratio: (idx + 1) * ratio], np.flip(features[(idx - x_center) * ratio: idx * ratio])), axis=0) x_arr = x_arr[0:window_width * x_center * ratio] else: x_arr = features[(idx - x_center) * ratio: (idx + x_center + 1) * ratio] res_x.append(np.transpose(x_arr)) res_y.append(lab) return (np.asarray(res_x), np.asarray(res_y)) def expand_array(x, max_len): return np.pad(x, [(0, 0), (0, max_len - x.shape[1])], mode='constant') def expand_tensor(x, max_len): return [expand_array(i, max_len) if i.shape[1] < max_len else i for i in x] import librosa window_width = 5 sr = 16000 n_fft = int(sr * 0.032) # window_width ms hop_length = int(sr * 0.010) # step ms all_data = { 'train': { 'x': [], 'y': [], }, 'valid': { 'x': [], 'y': [], } } files_data = { 'train': { }, 'valid': { }, 'test': { } } for ds in ['train', 'valid', 'test']: all_samples = [] for i in tqdm(os.listdir(os.path.join(labels_path, ds)), desc='Extract features on {} set'.format(ds)): fp = os.path.join(reduced_sample_rate_path, i).replace('.txt', '.wav').replace('_left', '').replace('_right', '') # fp = os.path.join(separated_audio_path, i).replace('.txt', '_vocals.wav').replace('_left', '').replace('_right', '') if not os.path.exists(fp): continue # Extract features wave, sr = librosa.load(fp, sr) s = librosa.feature.melspectrogram(y=wave, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=64) log_mels = librosa.power_to_db(s, ref=np.max) if ds == 'test': meta = np.full(int(log_mels.shape[1] / 20), -1) x, y = apply_window(log_mels, meta, window_width) else: meta = pd.read_csv(os.path.join(labels_path, ds, i)).values.squeeze() samples_ratio = int(log_mels.shape[1] / len(meta)) diff = meta.shape[0] * samples_ratio - log_mels.shape[1] x, y = apply_window(log_mels, meta, window_width) # x = x[y != -1] # y = y[y != -1] # all_data[ds]['x'].extend(x) # all_data[ds]['y'].extend(y) if len(y) > 0: file_dict = { 'x': x, 'y': y } files_data[ds][os.path.join(separated_audio_path, i)] = file_dict # max_len = max([i.shape[1] for i in all_data[ds]['x'] for ds in ['train', 'valid']]) from torchvision import transforms, models from torch.utils.data import Dataset, WeightedRandomSampler import torch.nn.functional as F class CustomTensorDataset(Dataset): """TensorDataset with support of transforms. """ def __init__(self, tensors, transform=None): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors self.transform = transform def __getitem__(self, index): x = self.tensors[0][index] if self.transform: x = self.transform(x) y = self.tensors[1][index] return x, y def __len__(self): return self.tensors[0].size(0) define_seed(12) x_train = torch.Tensor(expand_tensor(all_data['train']['x'], max_len)) x_valid = torch.Tensor(expand_tensor(all_data['valid']['x'], max_len)) y_train = torch.LongTensor(all_data['train']['y']) y_valid = torch.LongTensor(all_data['valid']['y']) class_sample_count = np.unique(y_train, return_counts=True)[1] class_weights = torch.Tensor(max(class_sample_count) / class_sample_count) train_dataset = CustomTensorDataset(tensors=(x_train, y_train), transform=None) valid_dataset = CustomTensorDataset(tensors=(x_valid, y_valid), transform=None) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=6, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=6) class Transfer_Cnn14(nn.Module): def __init__(self, pretrained_path, classes_num, pretrain, freeze_base): """Classifier for a new task using pretrained Cnn14 as a sub module. """ super(Transfer_Cnn14, self).__init__() self.load_base(pretrain, pretrained_path) self.base.spectrogram_extractor = nn.Identity() self.base.logmel_extractor = nn.Identity() self.base.spec_augmenter = nn.Identity() # Transfer to another task layer self.classifier = nn.Linear(2048, classes_num, bias=True) if freeze_base: for param in self.base.parameters(): param.requires_grad = False self.init_weights() def load_base(self, pretrain, pretrained_path): self.base = Cnn14(sample_rate=16000, window_size=1024, hop_size=320, mel_bins=64, fmin=50, fmax=14000, classes_num=527) if pretrain: logging.info('Load pretrained model from {}'.format(pretrained_path)) checkpoint = torch.load(pretrained_path) self.base.load_state_dict(checkpoint['model']) def init_weights(self): init_layer(self.classifier) def forward(self, x, mixup_lambda=None): """Input: (batch_size, 1, time_steps, mel_bins) """ x = x.unsqueeze(1).transpose(2, 3) output_dict = self.base(x, mixup_lambda) embedding = output_dict['embedding'] clipwise_output = self.classifier(embedding) output_dict['clipwise_output'] = clipwise_output return output_dict['clipwise_output'] # %%capture output from sklearn.metrics import recall_score, f1_score, accuracy_score def custom_score(targets, predicts, average='macro'): return 0.67 * f1_score(targets, predicts, average='macro') + 0.33 * accuracy_score(targets, predicts) define_seed(12) pretrained_path = '../models/pretrained/Cnn14_mAP=0.431.pth' model = Transfer_Cnn14(pretrained_path=pretrained_path, classes_num=len(class_names), pretrain=True, freeze_base=False) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) loss = torch.nn.CrossEntropyLoss(weight=class_weights.cuda()) optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True) model, max_epoch, max_performance = train_model(model, loss, optimizer, None, num_epochs=50, device=device, train_dataloader=train_dataloader, valid_dataloader=valid_dataloader, class_names=class_names, log_root=log_root, tb_log_root=tb_log_root, features_name=features_type, experiment_name='fg2020-LossWeighted-PANN-CNN14-50', metrics=[custom_score, f1_score, accuracy_score], log_iter=[]) print('Epoch: {0}\n'.format(max_epoch)) print(max_performance) print(output) from sklearn.metrics import recall_score, f1_score, accuracy_score def custom_score(targets, predicts, average='macro'): return 0.67 * f1_score(targets, predicts, average='macro') + 0.33 * accuracy_score(targets, predicts) def predict_proba(x, y, model, model_name, model_epoch, log_root, batch_size): # print('Initialize data') all_predictions = [] all_labels = [] dataset = CustomTensorDataset(tensors=(x, y), transform=None) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=6) # print('Initialize model') dictionary_path = get_model_by_epoch(os.path.join(log_root, '{0}'.format(model_name)), model_epoch) # print(dictionary_path) checkpoint = torch.load(dictionary_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) # print('Testing') for inputs, labels in dataloader: inputs = inputs.to(device) labels = labels.to(device) predicts = None with torch.set_grad_enabled(False): preds = model(inputs) predicts = torch.nn.functional.softmax(preds, dim=1).data.cpu().numpy() all_labels.append(labels.data.cpu().numpy()) all_predictions.append(predicts) return np.concatenate(all_predictions), np.concatenate(all_labels) pretrained_path = '../models/pretrained/Cnn14_mAP=0.431.pth' model = Transfer_Cnn14(pretrained_path=pretrained_path, classes_num=len(class_names), pretrain=True, freeze_base=False) model_name = 'mel_64x32_fg2020-LossWeighted-PANN-CNN14-50' x_train = torch.Tensor(expand_tensor(all_data['train']['x'], max_len)) x_valid = torch.Tensor(expand_tensor(all_data['valid']['x'], max_len)) y_train = torch.LongTensor(all_data['train']['y']) y_valid = torch.LongTensor(all_data['valid']['y']) probas, labels = predict_proba(x_train, y_train, model, model_name, 43, log_root, batch_size) preds = probas.argmax(axis=1) print('Metrics: F1: {0}, Acc: {1}, Custom: {2}'.format(f1_score(labels, preds, average='macro'), accuracy_score(labels, preds), custom_score(labels, preds, 'macro'))) probas, labels = predict_proba(x_valid, y_valid, model, model_name, 43, log_root, batch_size) preds = probas.argmax(axis=1) print('Metrics: F1: {0}, Acc: {1}, Custom: {2}'.format(f1_score(labels, preds, average='macro'), accuracy_score(labels, preds), custom_score(labels, preds, 'macro'))) pretrained_path = '../models/pretrained/Cnn14_mAP=0.431.pth' model = Transfer_Cnn14(pretrained_path=pretrained_path, classes_num=len(class_names), pretrain=True, freeze_base=False) model_name = 'mel_64x32_fg2020-LossWeighted-PANN-CNN14-50' for ds in ['train', 'valid', 'test']: for f in tqdm(files_data[ds]): x_train = torch.Tensor(files_data[ds][f]['x']) y_train = torch.LongTensor(files_data[ds][f]['y']) probas, labels = predict_proba(x_train, y_train, model, model_name, 43, log_root, batch_size) res = np.concatenate((probas, np.expand_dims(labels, axis=1)), axis=1) os.makedirs(os.path.join(model_name, ds), exist_ok=True) fn = os.path.splitext(os.path.basename(f))[0] np.savetxt(os.path.join(model_name, ds, "{0}.csv".format(fn)), res, delimiter=",") ```
github_jupyter
import os import sys import matplotlib.pyplot as plt import IPython.display as ipd import pandas as pd import re import subprocess import numpy as np import math %load_ext autoreload %autoreload 2 %matplotlib inline sys.path.append('../audioset_tagging_cnn/') sys.path.append('../src') import logging import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from pytorch.models import * from utils.notebooks_utils import * data_root = '/media/maxim/SStorage/FG_2020/' reduced_sample_rate = 'data/Reduced_sample_rate' reduced_sample_rate_path = os.path.join(data_root, reduced_sample_rate) separated_audio = 'data/Separated_audio' separated_audio_path = os.path.join(data_root, separated_audio) labels = 'labels/downgraded' labels_path = os.path.join(data_root, labels) features = 'features' log_root = '/media/maxim/SStorage/FG_2020/logs/' tb_log_root = '/media/maxim/SStorage/FG_2020/logs/tb/' features_type = 'mel_64x32' batch_size = 128 class_names = ['Neutral', 'Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise'] def apply_window(features, labels, window_width): features = np.transpose(features) ratio = int(len(features) / len(labels)) x_center = int((window_width - 1) / 2) res_x = [] res_y = [] for idx, lab in enumerate(labels): x_arr = None if idx < x_center: x_arr = np.concatenate((np.flip(features[(idx + 1) * ratio: (idx + x_center + 1) * ratio]), features[idx * ratio: (idx + 1) * ratio], features[(idx + 1) * ratio: (idx + x_center + 1) * ratio]), axis=0) x_arr = x_arr[0:window_width * x_center * ratio] elif len(features) < (idx + x_center + 1) * ratio: x_arr = np.concatenate((features[(idx - x_center) * ratio: idx * ratio], features[idx * ratio: (idx + 1) * ratio], np.flip(features[(idx - x_center) * ratio: idx * ratio])), axis=0) x_arr = x_arr[0:window_width * x_center * ratio] else: x_arr = features[(idx - x_center) * ratio: (idx + x_center + 1) * ratio] res_x.append(np.transpose(x_arr)) res_y.append(lab) return (np.asarray(res_x), np.asarray(res_y)) def expand_array(x, max_len): return np.pad(x, [(0, 0), (0, max_len - x.shape[1])], mode='constant') def expand_tensor(x, max_len): return [expand_array(i, max_len) if i.shape[1] < max_len else i for i in x] import librosa window_width = 5 sr = 16000 n_fft = int(sr * 0.032) # window_width ms hop_length = int(sr * 0.010) # step ms all_data = { 'train': { 'x': [], 'y': [], }, 'valid': { 'x': [], 'y': [], } } files_data = { 'train': { }, 'valid': { }, 'test': { } } for ds in ['train', 'valid', 'test']: all_samples = [] for i in tqdm(os.listdir(os.path.join(labels_path, ds)), desc='Extract features on {} set'.format(ds)): fp = os.path.join(reduced_sample_rate_path, i).replace('.txt', '.wav').replace('_left', '').replace('_right', '') # fp = os.path.join(separated_audio_path, i).replace('.txt', '_vocals.wav').replace('_left', '').replace('_right', '') if not os.path.exists(fp): continue # Extract features wave, sr = librosa.load(fp, sr) s = librosa.feature.melspectrogram(y=wave, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=64) log_mels = librosa.power_to_db(s, ref=np.max) if ds == 'test': meta = np.full(int(log_mels.shape[1] / 20), -1) x, y = apply_window(log_mels, meta, window_width) else: meta = pd.read_csv(os.path.join(labels_path, ds, i)).values.squeeze() samples_ratio = int(log_mels.shape[1] / len(meta)) diff = meta.shape[0] * samples_ratio - log_mels.shape[1] x, y = apply_window(log_mels, meta, window_width) # x = x[y != -1] # y = y[y != -1] # all_data[ds]['x'].extend(x) # all_data[ds]['y'].extend(y) if len(y) > 0: file_dict = { 'x': x, 'y': y } files_data[ds][os.path.join(separated_audio_path, i)] = file_dict # max_len = max([i.shape[1] for i in all_data[ds]['x'] for ds in ['train', 'valid']]) from torchvision import transforms, models from torch.utils.data import Dataset, WeightedRandomSampler import torch.nn.functional as F class CustomTensorDataset(Dataset): """TensorDataset with support of transforms. """ def __init__(self, tensors, transform=None): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors self.transform = transform def __getitem__(self, index): x = self.tensors[0][index] if self.transform: x = self.transform(x) y = self.tensors[1][index] return x, y def __len__(self): return self.tensors[0].size(0) define_seed(12) x_train = torch.Tensor(expand_tensor(all_data['train']['x'], max_len)) x_valid = torch.Tensor(expand_tensor(all_data['valid']['x'], max_len)) y_train = torch.LongTensor(all_data['train']['y']) y_valid = torch.LongTensor(all_data['valid']['y']) class_sample_count = np.unique(y_train, return_counts=True)[1] class_weights = torch.Tensor(max(class_sample_count) / class_sample_count) train_dataset = CustomTensorDataset(tensors=(x_train, y_train), transform=None) valid_dataset = CustomTensorDataset(tensors=(x_valid, y_valid), transform=None) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=6, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=6) class Transfer_Cnn14(nn.Module): def __init__(self, pretrained_path, classes_num, pretrain, freeze_base): """Classifier for a new task using pretrained Cnn14 as a sub module. """ super(Transfer_Cnn14, self).__init__() self.load_base(pretrain, pretrained_path) self.base.spectrogram_extractor = nn.Identity() self.base.logmel_extractor = nn.Identity() self.base.spec_augmenter = nn.Identity() # Transfer to another task layer self.classifier = nn.Linear(2048, classes_num, bias=True) if freeze_base: for param in self.base.parameters(): param.requires_grad = False self.init_weights() def load_base(self, pretrain, pretrained_path): self.base = Cnn14(sample_rate=16000, window_size=1024, hop_size=320, mel_bins=64, fmin=50, fmax=14000, classes_num=527) if pretrain: logging.info('Load pretrained model from {}'.format(pretrained_path)) checkpoint = torch.load(pretrained_path) self.base.load_state_dict(checkpoint['model']) def init_weights(self): init_layer(self.classifier) def forward(self, x, mixup_lambda=None): """Input: (batch_size, 1, time_steps, mel_bins) """ x = x.unsqueeze(1).transpose(2, 3) output_dict = self.base(x, mixup_lambda) embedding = output_dict['embedding'] clipwise_output = self.classifier(embedding) output_dict['clipwise_output'] = clipwise_output return output_dict['clipwise_output'] # %%capture output from sklearn.metrics import recall_score, f1_score, accuracy_score def custom_score(targets, predicts, average='macro'): return 0.67 * f1_score(targets, predicts, average='macro') + 0.33 * accuracy_score(targets, predicts) define_seed(12) pretrained_path = '../models/pretrained/Cnn14_mAP=0.431.pth' model = Transfer_Cnn14(pretrained_path=pretrained_path, classes_num=len(class_names), pretrain=True, freeze_base=False) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) loss = torch.nn.CrossEntropyLoss(weight=class_weights.cuda()) optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True) model, max_epoch, max_performance = train_model(model, loss, optimizer, None, num_epochs=50, device=device, train_dataloader=train_dataloader, valid_dataloader=valid_dataloader, class_names=class_names, log_root=log_root, tb_log_root=tb_log_root, features_name=features_type, experiment_name='fg2020-LossWeighted-PANN-CNN14-50', metrics=[custom_score, f1_score, accuracy_score], log_iter=[]) print('Epoch: {0}\n'.format(max_epoch)) print(max_performance) print(output) from sklearn.metrics import recall_score, f1_score, accuracy_score def custom_score(targets, predicts, average='macro'): return 0.67 * f1_score(targets, predicts, average='macro') + 0.33 * accuracy_score(targets, predicts) def predict_proba(x, y, model, model_name, model_epoch, log_root, batch_size): # print('Initialize data') all_predictions = [] all_labels = [] dataset = CustomTensorDataset(tensors=(x, y), transform=None) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=6) # print('Initialize model') dictionary_path = get_model_by_epoch(os.path.join(log_root, '{0}'.format(model_name)), model_epoch) # print(dictionary_path) checkpoint = torch.load(dictionary_path) model.load_state_dict(checkpoint['model_state_dict']) model.eval() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) # print('Testing') for inputs, labels in dataloader: inputs = inputs.to(device) labels = labels.to(device) predicts = None with torch.set_grad_enabled(False): preds = model(inputs) predicts = torch.nn.functional.softmax(preds, dim=1).data.cpu().numpy() all_labels.append(labels.data.cpu().numpy()) all_predictions.append(predicts) return np.concatenate(all_predictions), np.concatenate(all_labels) pretrained_path = '../models/pretrained/Cnn14_mAP=0.431.pth' model = Transfer_Cnn14(pretrained_path=pretrained_path, classes_num=len(class_names), pretrain=True, freeze_base=False) model_name = 'mel_64x32_fg2020-LossWeighted-PANN-CNN14-50' x_train = torch.Tensor(expand_tensor(all_data['train']['x'], max_len)) x_valid = torch.Tensor(expand_tensor(all_data['valid']['x'], max_len)) y_train = torch.LongTensor(all_data['train']['y']) y_valid = torch.LongTensor(all_data['valid']['y']) probas, labels = predict_proba(x_train, y_train, model, model_name, 43, log_root, batch_size) preds = probas.argmax(axis=1) print('Metrics: F1: {0}, Acc: {1}, Custom: {2}'.format(f1_score(labels, preds, average='macro'), accuracy_score(labels, preds), custom_score(labels, preds, 'macro'))) probas, labels = predict_proba(x_valid, y_valid, model, model_name, 43, log_root, batch_size) preds = probas.argmax(axis=1) print('Metrics: F1: {0}, Acc: {1}, Custom: {2}'.format(f1_score(labels, preds, average='macro'), accuracy_score(labels, preds), custom_score(labels, preds, 'macro'))) pretrained_path = '../models/pretrained/Cnn14_mAP=0.431.pth' model = Transfer_Cnn14(pretrained_path=pretrained_path, classes_num=len(class_names), pretrain=True, freeze_base=False) model_name = 'mel_64x32_fg2020-LossWeighted-PANN-CNN14-50' for ds in ['train', 'valid', 'test']: for f in tqdm(files_data[ds]): x_train = torch.Tensor(files_data[ds][f]['x']) y_train = torch.LongTensor(files_data[ds][f]['y']) probas, labels = predict_proba(x_train, y_train, model, model_name, 43, log_root, batch_size) res = np.concatenate((probas, np.expand_dims(labels, axis=1)), axis=1) os.makedirs(os.path.join(model_name, ds), exist_ok=True) fn = os.path.splitext(os.path.basename(f))[0] np.savetxt(os.path.join(model_name, ds, "{0}.csv".format(fn)), res, delimiter=",")
0.446495
0.291857
``` import pandas as pd df=pd.read_csv('train.csv') df df=df.dropna() df['content']=df['title']+df['text'] df['id'] = 0 df['id'] = df.index + 880 df df.to_csv('d2v.csv') ddf=pd.read_csv('d2v.csv') df ddf #SVM from getEmbeddings import getEmbeddings import numpy as np from sklearn.svm import SVC import matplotlib.pyplot as plt import scikitplot.plotters as skplt def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() xtr,xte,ytr,yte = getEmbeddings('d2v.csv') np.save('./xtr', xtr) np.save('./xte', xte) np.save('./ytr', ytr) np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') clf = SVC() clf.fit(xtr, ytr) y_pred = clf.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Naive Bayes from getEmbeddings import getEmbeddings from sklearn.naive_bayes import GaussianNB import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') gnb = GaussianNB() gnb.fit(xtr,ytr) y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #KNN from getEmbeddings import getEmbeddings from sklearn.neighbors import KNeighborsClassifier import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(xtr, ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Logistic Regression from getEmbeddings import getEmbeddings from sklearn.linear_model import LogisticRegression import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = LogisticRegression(random_state = 0) classifier.fit(xtr, ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Decision Tree from getEmbeddings import getEmbeddings from sklearn.tree import DecisionTreeClassifier import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = DecisionTreeClassifier() classifier.fit(xtr,ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Random Forest from getEmbeddings import getEmbeddings from sklearn.ensemble import RandomForestClassifier import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = RandomForestClassifier() classifier.fit(xtr,ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) ```
github_jupyter
import pandas as pd df=pd.read_csv('train.csv') df df=df.dropna() df['content']=df['title']+df['text'] df['id'] = 0 df['id'] = df.index + 880 df df.to_csv('d2v.csv') ddf=pd.read_csv('d2v.csv') df ddf #SVM from getEmbeddings import getEmbeddings import numpy as np from sklearn.svm import SVC import matplotlib.pyplot as plt import scikitplot.plotters as skplt def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() xtr,xte,ytr,yte = getEmbeddings('d2v.csv') np.save('./xtr', xtr) np.save('./xte', xte) np.save('./ytr', ytr) np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') clf = SVC() clf.fit(xtr, ytr) y_pred = clf.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Naive Bayes from getEmbeddings import getEmbeddings from sklearn.naive_bayes import GaussianNB import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') gnb = GaussianNB() gnb.fit(xtr,ytr) y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #KNN from getEmbeddings import getEmbeddings from sklearn.neighbors import KNeighborsClassifier import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(xtr, ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Logistic Regression from getEmbeddings import getEmbeddings from sklearn.linear_model import LogisticRegression import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = LogisticRegression(random_state = 0) classifier.fit(xtr, ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Decision Tree from getEmbeddings import getEmbeddings from sklearn.tree import DecisionTreeClassifier import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = DecisionTreeClassifier() classifier.fit(xtr,ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred) #Random Forest from getEmbeddings import getEmbeddings from sklearn.ensemble import RandomForestClassifier import numpy as np import matplotlib.pyplot as plt import scikitplot.plotters as skplt from sklearn.metrics import classification_report, confusion_matrix def plot_cmat(yte, ypred): '''Plotting confusion matrix''' skplt.plot_confusion_matrix(yte,ypred) plt.show() # xtr,xte,ytr,yte = getEmbeddings("d2v.csv") # np.save('./xtr', xtr) # np.save('./xte', xte) # np.save('./ytr', ytr) # np.save('./yte', yte) xtr = np.load('./xtr.npy') xte = np.load('./xte.npy') ytr = np.load('./ytr.npy') yte = np.load('./yte.npy') classifier = RandomForestClassifier() classifier.fit(xtr,ytr) y_pred = classifier.predict(xte) cm = confusion_matrix(yte, y_pred) #sns.heatmap(cm, annot=True, fmt="d") #y_pred = gnb.predict(xte) m = yte.shape[0] n = (yte != y_pred).sum() print("Accuracy = " + format((m-n)/m*100, '.2f') + "%") plot_cmat(yte, y_pred)
0.394434
0.270414
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids)
0.268749
0.215464
<img src="Logo.png" width="100" align="left"/> # <center> Preparatory Unit project:</center> Congratulations on finishing the lessons content for this preparatory unit!! At this stage it's important to test your theoritical concepts from a practical side and that's exactely the goal of this project. ## Some guidelines: 1. To run a cell you can use the shortcut use : Shift + Enter 2. Only sections mentioned as To-Do are the places where you should put in your own code other than that we do not recommend that you change the provided code. 3. You will be graded for the visibility of your code so make sure you respect the correct indentation and your code contains suitable variables names. 4. This notebook is designed in a sequential way so if you solve your project on different days make sure to run the previous cells before you can run the one you want. 5. Teacher assistants in th slack space remain available to answer any questions you might have. >Best of luck ! ## Project Sections: In this project you will have a chance to practice most of the important aspects we saw throughout The Preparatory Unit. This project is divided into 5 sections: 1. [Setting the environement](#set_env) 2. [Importing necessary tools](#importing) 3. [SQLite section](#sql) 4. [Data types section](#datatypes) 5. [Linear Algebra section](#algebra) ### 1. Setting the environement: <a id='set_env'></a> ``` # Make sure you have virtualenv installed !pip install --user virtualenv # To-Do: create a virtual environement called myenv !python -m venv myenv # Activate the environement ! myenv\Scripts\activate.bat # Add this virtual environement to Jupyter notebook !pip install --user ipykernel !python -m ipykernel install --user --name=myenv # Install the necessary dependencies !pip install scipy !pip install numpy ``` > Please check if you have sqlite installed on your device. For more informations head to the sql lesson ### 2. Importing necessary tools:<a id='importing'></a> ``` from data import database_manager as dm import utils from matplotlib import pyplot from linear_algebra import curve_fitting as cf ``` ### 3. SQLite section : <a id='sql'></a> ``` # create a connection to the database connection = dm.create_connection("longley.db") # To-Do : retrieve rows of the table rows = dm.select_all(connection) dm.print_rows(rows) ``` > Since at this stage we already retrieved our data it's more memory efficient to close the connection to our database. ``` #To-Do close connection using the close_connection function from the data_manager file (dm) dm.close_connection(connection) ``` ### 4. Data types section : <a id='datatypes'></a> Let's check the datatypes of the retrieved rows ``` rows ``` > This is a list containing multiple tuples, each tuple is a row in the Table with each element within this tuple being a string. We will be executing mathematical operations on these values and hence we need them in numerical format. Each value contains decimal fractions which means the suitable type to convert to is either double or float. In this case we need to convert these values to a float fomat. Head up to the "utils.py" file and set the function convert_to_floats to be able to do so. ``` # To-Do convert to an ndarray of floats by calling the function convert_to_floats from the utils file # make sure to set some requirements in that function before you call it here data = utils.convert_to_floats(rows) # let's check the shape data.shape # Let's see the format data ``` ### 5. Linear Algebra section: <a id='algebra'></a> ``` # Let's check if the two variables GNP.deflator and year are correlated x, y = data[:,5],data[:, 0] pyplot.scatter(x, y) pyplot.xlabel("Year") pyplot.ylabel("GNP.deflactor") pyplot.show() ``` > You can clearly see that the two variables: GNP.deflator (y axis) and year (x axis). In other words the GNP.deflactor is increasing throughout the years. Under this trend it makes sense that we can fit a line to these data points, a line that can describe this trend. And this is our task for this section. #### Explanation: Curve fitting aims to find the perfect curve equation for a number of correlated variables. In our example we aim to find the equation for the line that can perfectly fit this point . Such a line should be at minimum distance from all points in average. Because we are dealing with two variables only, the line's equation should be of the form : y = a*x + b . Which is a typical linear equation. To acheieve this you will have to : 1. Head to the file linear_algebra/curve_fiiting.py file. 2. Set the objective function's code (function set_objective), objective function is the function that returns the typical shape of our wanted linear equation ( a*x+b), Please delete the "pass" statement and write your code. 3. Here in this notebook in the cell below, call the function get_results and pass to it x and y and get back the optimal values of "a" and "b". ``` # To-Do get the values of a and b using the get_result function a,b = cf.get_result(x,y) # plotting the result from numpy import arange pyplot.scatter(x, y) # define a sequence of inputs between the smallest and largest known inputs x_line = arange(min(x), max(x), 1) # calculate the output for the range y_line = cf.set_objective(x_line, a, b) # create a line plot for the mapping function pyplot.plot(x_line, y_line, '--', color='red') pyplot.show() ``` > yohooo ! It's indeed working!!! # Final thoughts : This curve fitting process can have many use cases within the machine learning workflow. A curve fitting can be used as a way to fill in missing values. Datasets aren't always clean. In fact in 90% of the cases we need to do some pre-processing and cleaning for the data before using it in any analysis. In many cases, this cleaning can include filling the missing values, in other words you have some data points with some missing values for some features, if we know that we have a "model" a curve that is supposed to model the trend(or correlation between two of our existing features we can use it to infer these missing values. So as a result Curve fitting can be used in the data cleaning step of the workflow. Another use case, is when the curve fitting is our end goal, Thus we are cleaning and modeling because the end objective is to have such an equation, in this case the curve fitting is the heart of the Machine learning project.
github_jupyter
# Make sure you have virtualenv installed !pip install --user virtualenv # To-Do: create a virtual environement called myenv !python -m venv myenv # Activate the environement ! myenv\Scripts\activate.bat # Add this virtual environement to Jupyter notebook !pip install --user ipykernel !python -m ipykernel install --user --name=myenv # Install the necessary dependencies !pip install scipy !pip install numpy from data import database_manager as dm import utils from matplotlib import pyplot from linear_algebra import curve_fitting as cf # create a connection to the database connection = dm.create_connection("longley.db") # To-Do : retrieve rows of the table rows = dm.select_all(connection) dm.print_rows(rows) #To-Do close connection using the close_connection function from the data_manager file (dm) dm.close_connection(connection) rows # To-Do convert to an ndarray of floats by calling the function convert_to_floats from the utils file # make sure to set some requirements in that function before you call it here data = utils.convert_to_floats(rows) # let's check the shape data.shape # Let's see the format data # Let's check if the two variables GNP.deflator and year are correlated x, y = data[:,5],data[:, 0] pyplot.scatter(x, y) pyplot.xlabel("Year") pyplot.ylabel("GNP.deflactor") pyplot.show() # To-Do get the values of a and b using the get_result function a,b = cf.get_result(x,y) # plotting the result from numpy import arange pyplot.scatter(x, y) # define a sequence of inputs between the smallest and largest known inputs x_line = arange(min(x), max(x), 1) # calculate the output for the range y_line = cf.set_objective(x_line, a, b) # create a line plot for the mapping function pyplot.plot(x_line, y_line, '--', color='red') pyplot.show()
0.656878
0.963437
# Taller 1: Básico de Python + Funciones + Listas + Diccionarios Este taller es para resolver problemas básicos de python. Manejo de listas, diccionarios, etc. El taller debe ser realizado en un Notebook de Jupyter en la carpeta de cada uno. Debe haber commits con el avance del taller. Debajo de cada pregunta hay una celda para el código. ## Basico de Python ### 1. Qué versión de python está corriendo? ``` import sys print('{0[0]}.{0[1]}'.format(sys.version_info)) ``` ### 2. Calcule el área de un circulo de radio 5 ``` pi = 3.1416 radio = 5 area= pi * radio**2 print(area) ``` ### 3. Escriba código que imprima todos los colores de que están en color_list_1 y no estan presentes en color_list_2 Resultado esperado : {'Black', 'White'} ``` color_list_1 = set(["White", "Black", "Red"]) color_list_2 = set(["Red", "Green"]) color_list_1 - color_list_2 ``` ### 4 Imprima una línea por cada carpeta que compone el Path donde se esta ejecutando python e.g. C:/User/sergio/code/programación Salida Esperada: + User + sergio + code + programacion ``` path = 'C:/Users/Margarita/Documents/Mis_documentos/Biologia_EAFIT/Semestre_IX/Programacion/' size = len (path) guardar = "" for i in range(3,size): if path[i] != '/': guardar = guardar + path[i] else: print(guardar) guardar = "" ``` ## Manejo de Listas ### 5. Imprima la suma de números de my_list ``` my_list = [5,7,8,9,17] sum_list = sum (my_list) print(sum_list) ``` ### 6. Inserte un elemento_a_insertar antes de cada elemento de my_list ``` elemento_a_insertar = 'E' my_list = [1, 2, 3, 4] ``` La salida esperada es una lista así: [E, 1, E, 2, E, 3, E, 4] ``` elemento_a_insertar = 'E' my_list = [1, 2, 3, 4] size = len (my_list) carpeta = [] for i in range(size): carpeta = carpeta + [elemento_a_insertar,my_list[i]] my_list = carpeta print (my_list) ``` ### 7. Separe my_list en una lista de lista cada N elementos ``` N = 3 my_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n'] ``` Salida Epserada: [['a', 'd', 'g', 'j', 'm'], ['b', 'e', 'h', 'k', 'n'], ['c', 'f', 'i', 'l']] ``` N=3 lista=[] listaa = [] my_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n'] size = len(my_list) for i in range(N): lista = lista + [listaa] for i in range (size): lista[i%N] = lista[i%N] + [my_list[i]] print(lista) ``` ### 8. Encuentra la lista dentro de list_of_lists que la suma de sus elementos sea la mayor ``` list_of_lists = [ [1,2,3], [4,5,6], [10,11,12], [7,8,9] ] ``` Salida Esperada: [10, 11, 12] ``` list_of_lists = [ [1,2,3], [4,5,6], [10,11,12], [7,8,9] ] size = len(list_of_lists) carpeta = list_of_lists[1] for i in range(size): if sum(list_of_lists[i]) > sum(carpeta): carpeta = list_of_lists[i] print(carpeta) ``` ## Manejo de Diccionarios ### 9. Cree un diccionario que para cada número de 1 a N de llave tenga como valor N al cuadrado ``` N = 5 ``` Salida Esperada: {1:1, 2:4, 3:9, 4:16, 5:25} ``` N = 5 diccio = {} for i in range(1,N+1): diccio [i]= i**2 print(diccio) ``` ### 10. Concatene los diccionarios en dictionary_list para crear uno nuevo ``` dictionary_list=[{1:10, 2:20} , {3:30, 4:40}, {5:50,6:60}] ``` Salida Esperada: {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60} ``` dictionary_list=[{1:10, 2:20} , {3:30, 4:40}, {5:50,6:60}] final= {} for i in dictionary_list: for k in i: final[k] = i[k] print(final) ``` ### 11. Añada un nuevo valor "cuadrado" con el valor de "numero" de cada diccionario elevado al cuadrado ``` dictionary_list=[{'numero': 10, 'cantidad': 5} , {'numero': 12, 'cantidad': 3}, {'numero': 5, 'cantidad': 45}] ``` Salida Esperada: [{'numero': 10, 'cantidad': 5, 'cuadrado': 100} , {'numero': 12, 'cantidad': 3, , 'cuadrado': 144}, {'numero': 5, 'cantidad': 45, , 'cuadrado': 25}] ``` dictionary_list=[{'numero': 10, 'cantidad': 5} , {'numero': 12, 'cantidad': 3}, {'numero': 5, 'cantidad': 45}] for i in range(0,len(dictionary_list)): dictionary_list[i]['cuadrado']= dictionary_list[i]['numero']**2 print(dictionary_list) ``` ## Manejo de Funciones ### 12. Defina y llame una función que reciva 2 parametros y solucione el problema __3__ ``` def diferencia_conjuntos(color_list_1, color_list_2): print (color_list_1 - color_list_2) # Implementar la función diferencia_conjuntos( color_list_1 = set(["White", "Black", "Red"]) , color_list_2 = set(["Red", "Green"])) ``` ### 13. Defina y llame una función que reciva de parametro una lista de listas y solucione el problema 8 ``` def max_list_of_lists(list_of_lists): size = len(list_of_lists) carpeta = list_of_lists[1] for i in range(size): if sum(list_of_lists[i]) > sum(carpeta): carpeta = list_of_lists[i] print(carpeta) # Implementar la función list_of_lists = [ [1,2,3], [4,5,6], [10,11,12], [7,8,9] ] max_list_of_lists (list_of_lists) ``` ### 14. Defina y llame una función que reciva un parametro N y resuleva el problema 9 ``` def diccionario_cuadradovalor(N): diccio = {} final = {} for i in range(1,N+1): final = diccio [i]= i**2 print(diccio) #Implementar la función: N = 5 diccionario_cuadradovalor(N) ```
github_jupyter
import sys print('{0[0]}.{0[1]}'.format(sys.version_info)) pi = 3.1416 radio = 5 area= pi * radio**2 print(area) color_list_1 = set(["White", "Black", "Red"]) color_list_2 = set(["Red", "Green"]) color_list_1 - color_list_2 path = 'C:/Users/Margarita/Documents/Mis_documentos/Biologia_EAFIT/Semestre_IX/Programacion/' size = len (path) guardar = "" for i in range(3,size): if path[i] != '/': guardar = guardar + path[i] else: print(guardar) guardar = "" my_list = [5,7,8,9,17] sum_list = sum (my_list) print(sum_list) elemento_a_insertar = 'E' my_list = [1, 2, 3, 4] elemento_a_insertar = 'E' my_list = [1, 2, 3, 4] size = len (my_list) carpeta = [] for i in range(size): carpeta = carpeta + [elemento_a_insertar,my_list[i]] my_list = carpeta print (my_list) N = 3 my_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n'] N=3 lista=[] listaa = [] my_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n'] size = len(my_list) for i in range(N): lista = lista + [listaa] for i in range (size): lista[i%N] = lista[i%N] + [my_list[i]] print(lista) list_of_lists = [ [1,2,3], [4,5,6], [10,11,12], [7,8,9] ] list_of_lists = [ [1,2,3], [4,5,6], [10,11,12], [7,8,9] ] size = len(list_of_lists) carpeta = list_of_lists[1] for i in range(size): if sum(list_of_lists[i]) > sum(carpeta): carpeta = list_of_lists[i] print(carpeta) N = 5 N = 5 diccio = {} for i in range(1,N+1): diccio [i]= i**2 print(diccio) dictionary_list=[{1:10, 2:20} , {3:30, 4:40}, {5:50,6:60}] dictionary_list=[{1:10, 2:20} , {3:30, 4:40}, {5:50,6:60}] final= {} for i in dictionary_list: for k in i: final[k] = i[k] print(final) dictionary_list=[{'numero': 10, 'cantidad': 5} , {'numero': 12, 'cantidad': 3}, {'numero': 5, 'cantidad': 45}] dictionary_list=[{'numero': 10, 'cantidad': 5} , {'numero': 12, 'cantidad': 3}, {'numero': 5, 'cantidad': 45}] for i in range(0,len(dictionary_list)): dictionary_list[i]['cuadrado']= dictionary_list[i]['numero']**2 print(dictionary_list) def diferencia_conjuntos(color_list_1, color_list_2): print (color_list_1 - color_list_2) # Implementar la función diferencia_conjuntos( color_list_1 = set(["White", "Black", "Red"]) , color_list_2 = set(["Red", "Green"])) def max_list_of_lists(list_of_lists): size = len(list_of_lists) carpeta = list_of_lists[1] for i in range(size): if sum(list_of_lists[i]) > sum(carpeta): carpeta = list_of_lists[i] print(carpeta) # Implementar la función list_of_lists = [ [1,2,3], [4,5,6], [10,11,12], [7,8,9] ] max_list_of_lists (list_of_lists) def diccionario_cuadradovalor(N): diccio = {} final = {} for i in range(1,N+1): final = diccio [i]= i**2 print(diccio) #Implementar la función: N = 5 diccionario_cuadradovalor(N)
0.031581
0.885977
## Simple example of the 1-D Chirp-Z transform The chirp-z transform lets us sample the DTFT starting at an aribtrary point on a finely-spaced grid, with ony slightly worse runtime than an FFT evalutaed at the same number of points, potentially a much faster runtime than a FFT padded sufficiently to achieve the same spectral resolution ``` %pylab inline import numpy as np import chirpz ``` First we create a signal by smoothing some random noise and look at its DFT evaluated at 256 points, as computed by the FFT. Computing the N-point DFT of a N-point signal via the FFT has a complexity of $$O(N Log N)$$. ``` # a basic signal N = 256 np.random.seed(0) x = np.convolve(np.random.normal(0, 1, N), np.ones(20)/20.0)[:N] omegas = np.linspace(-np.pi, np.pi, N+1)[:N] dft = np.fft.fftshift(np.fft.fft(x)) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) pylab.plot(omegas, abs(dft)) ax.set_xlim(-np.pi, np.pi) ax.set_ylim(0, 40) ``` Then we explicitly evaluate the discrete-time Fourier transform (DTFT) of the signal $x[n]$. Remember that the DTFT of a discrete-time signal is a continuous function of omega. Naively evaluating the M-point DTFT of an N-point signal is $O(M\cdot N)$. Here we evaluate at M = 16x256 points, and zoom in on $[-\frac{\pi}{4}, \frac{\pi}{4} ]$ ``` fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) zoom_factor = 16 omegas_zoom = np.linspace(-np.pi, np.pi, zoom_factor*N+1)[:zoom_factor*N] dtft = chirpz.pychirpz.dtft(x, omegas_zoom) ax.plot(omegas_zoom, np.abs(dtft), label='dtft') ax.scatter(omegas, np.abs(dft), c='r', label='fft dft') ax.set_xlim(-np.pi/4, np.pi/4.0) ax.set_ylim(0, 40) pylab.legend() ``` Note from the above plot that there are various sampling artifacts. One way of resolving this is to oversample the DFT via zero-padding. This is what happens when you ask for an FFT evaluated at M points of an N-point signal. The results are below. This is a $O(M log M)$ operation ``` dtft_zoom = chirpz.pychirpz.dtft(x, omegas_zoom) fft_zoom = np.fft.fftshift(np.fft.fft(x, N*zoom_factor)) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) ax.plot(omegas_zoom, np.abs(dtft_zoom), label='dtft') ax.scatter(omegas, np.abs(dft), c='r', s=40, edgecolor='none', label='fft dft') ax.scatter(omegas_zoom, np.abs(fft_zoom), c='g', edgecolor='none', label='zero-padded M-point fft dft') ax.set_xlim(-np.pi/4, np.pi/4.0) ax.set_ylim(0, 40) pylab.legend() ``` But what if we just care about a subset of the DTFT? That is, what if we want to evaluate the DTFT on the region $[-\frac{\pi}{4}, \frac{\pi}{4} ]$ and ignore everything else? This is where the chirp-z transform comes it. We can specify that we only wish to evaluate the DTFT starting at a particular angular frequency, with a certain angular spacing, for a specific number of points. If we wish to evaluate the DTFT of a $N$-length signal at $M$ evenly-spaced points, it will take roughly $O((M+N) log (M+N))$. We can see the result below. ``` # now try chirp-z transform start = -np.pi / 4.0 omega_delta = omegas_zoom[1] - omegas_zoom[0] M = N * zoom_factor / 4.0 zoom_cz = chirpz.pychirpz.zoom_fft(x, start, omega_delta , M) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) omegas_cz = np.arange(M) * omega_delta + start ax.plot(omegas_zoom, np.abs(dtft_zoom), label='dtft') ax.scatter(omegas_cz, np.abs(zoom_cz), c='r', s=20, edgecolor='none', label='chirp-z') ax.set_xlim(-np.pi/4, np.pi/4.0) ax.set_ylim(0, 40) pylab.legend() ``` This can be a substantial savings if you are trying to evaluate a very zoomed-in region of the DTFT -- that is, if you would have had to significantly pad your FFT by a large factor to achieve a given frequency spacing
github_jupyter
%pylab inline import numpy as np import chirpz # a basic signal N = 256 np.random.seed(0) x = np.convolve(np.random.normal(0, 1, N), np.ones(20)/20.0)[:N] omegas = np.linspace(-np.pi, np.pi, N+1)[:N] dft = np.fft.fftshift(np.fft.fft(x)) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) pylab.plot(omegas, abs(dft)) ax.set_xlim(-np.pi, np.pi) ax.set_ylim(0, 40) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) zoom_factor = 16 omegas_zoom = np.linspace(-np.pi, np.pi, zoom_factor*N+1)[:zoom_factor*N] dtft = chirpz.pychirpz.dtft(x, omegas_zoom) ax.plot(omegas_zoom, np.abs(dtft), label='dtft') ax.scatter(omegas, np.abs(dft), c='r', label='fft dft') ax.set_xlim(-np.pi/4, np.pi/4.0) ax.set_ylim(0, 40) pylab.legend() dtft_zoom = chirpz.pychirpz.dtft(x, omegas_zoom) fft_zoom = np.fft.fftshift(np.fft.fft(x, N*zoom_factor)) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) ax.plot(omegas_zoom, np.abs(dtft_zoom), label='dtft') ax.scatter(omegas, np.abs(dft), c='r', s=40, edgecolor='none', label='fft dft') ax.scatter(omegas_zoom, np.abs(fft_zoom), c='g', edgecolor='none', label='zero-padded M-point fft dft') ax.set_xlim(-np.pi/4, np.pi/4.0) ax.set_ylim(0, 40) pylab.legend() # now try chirp-z transform start = -np.pi / 4.0 omega_delta = omegas_zoom[1] - omegas_zoom[0] M = N * zoom_factor / 4.0 zoom_cz = chirpz.pychirpz.zoom_fft(x, start, omega_delta , M) fig = pylab.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) omegas_cz = np.arange(M) * omega_delta + start ax.plot(omegas_zoom, np.abs(dtft_zoom), label='dtft') ax.scatter(omegas_cz, np.abs(zoom_cz), c='r', s=20, edgecolor='none', label='chirp-z') ax.set_xlim(-np.pi/4, np.pi/4.0) ax.set_ylim(0, 40) pylab.legend()
0.39129
0.986031
<a id="title_ID"></a> # JWST Pipeline Validation Notebook: # calwebb_detector1, jump unit tests <span style="color:red"> **Instruments Affected**</span>: NIRCam, NIRISS, NIRSpec, MIRI, FGS ### Table of Contents <div style="text-align: left"> <br> [Introduction](#intro) <br> [JWST Unit Tests](#unit) <br> [Defining Terms](#terms) <br> [Test Description](#description) <br> [Data Description](#data_descr) <br> [Imports](#imports) <br> [Convenience Functions](#functions) <br> [Perform Tests](#testing) <br> [About This Notebook](#about) <br> </div> <a id="intro"></a> # Introduction This is the validation notebook that displays the unit tests for the Jump Detection step in calwebb_detector1. This notebook runs and displays the unit tests that are performed as a part of the normal software continuous integration process. For more information on the pipeline visit the links below. * Pipeline description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/jump/index.html * Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/ [Top of Page](#title_ID) <a id="unit"></a> # JWST Unit Tests JWST unit tests are located in the "tests" folder for each pipeline step within the [GitHub repository](https://github.com/spacetelescope/jwst/tree/master/jwst/), e.g., ```jwst/jump/tests```. * Unit test README: https://github.com/spacetelescope/jwst#unit-tests [Top of Page](#title_ID) <a id="terms"></a> # Defining Terms These are terms or acronymns used in this notebook that may not be known a general audience. * JWST: James Webb Space Telescope * NIRCam: Near-Infrared Camera [Top of Page](#title_ID) <a id="description"></a> # Test Description Unit testing is a software testing method by which individual units of source code are tested to determine whether they are working sufficiently well. Unit tests do not require a separate data file; the test creates the necessary test data and parameters as a part of the test code. [Top of Page](#title_ID) <a id="data_descr"></a> # Data Description Data used for unit tests is created on the fly within the test itself, and is typically an array in the expected format of JWST data with added metadata needed to run through the pipeline. [Top of Page](#title_ID) <a id="imports"></a> # Imports * tempfile for creating temporary output products * pytest for unit test functions * jwst for the JWST Pipeline * IPython.display for display pytest reports [Top of Page](#title_ID) ``` import tempfile import pytest import jwst from IPython.display import IFrame ``` <a id="functions"></a> # Convenience Functions Here we define any convenience functions to help with running the unit tests. [Top of Page](#title_ID) ``` def display_report(fname): '''Convenience function to display pytest report.''' return IFrame(src=fname, width=700, height=600) ``` <a id="testing"></a> # Perform Tests Below we run the unit tests for the Jump Detection step. [Top of Page](#title_ID) ``` with tempfile.TemporaryDirectory() as tmpdir: !pytest jwst/jump -v --ignore=jwst/associations --ignore=jwst/datamodels --ignore=jwst/stpipe --ignore=jwst/regtest --html=tmpdir/unit_report.html --self-contained-html report = display_report('tmpdir/unit_report.html') report ``` <a id="about"></a> ## About This Notebook **Author:** Alicia Canipe, Staff Scientist, NIRCam <br>**Updated On:** 01/07/2021 [Top of Page](#title_ID) <img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
github_jupyter
import tempfile import pytest import jwst from IPython.display import IFrame def display_report(fname): '''Convenience function to display pytest report.''' return IFrame(src=fname, width=700, height=600) with tempfile.TemporaryDirectory() as tmpdir: !pytest jwst/jump -v --ignore=jwst/associations --ignore=jwst/datamodels --ignore=jwst/stpipe --ignore=jwst/regtest --html=tmpdir/unit_report.html --self-contained-html report = display_report('tmpdir/unit_report.html') report
0.191933
0.957952
``` import sys,os,time,cv2 import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import ops from resnet_tsm import resnet18 as resnet from utils import get_dtstr, emo2txt, imread_to_rgb, crop_img import face_recognition import moviepy.editor as mpe # restore net net = resnet().cuda() ckpt = torch.load(ops.weight_path+'/resnet18_tsm_weights.tar') net.load_state_dict(ckpt['model_state_dict']) net.eval() print 'net loaded' # episode_path # DB_PATH = '/home/jhchoi/datasets2/friends/' epi_sel = 'ep06' DB_PATH = '../../img_ep06/' # output path OUT_PATH = '../data/friends_s01_'+epi_sel+'.jsonl' out_json = open(OUT_PATH, 'w+') import json ep06_track = sorted(os.listdir('../../anno_ep06/')) ep06_dict = dict() for i,f in enumerate(ep06_track): with open('../../anno_ep06/'+f) as jsf: js = json.load(jsf) ep06_dict[i] = js for i,imf in enumerate(sorted(os.listdir(DB_PATH))): sys.stdout.write("\r"+str(i)+'/'+str(len(os.listdir(DB_PATH)))) f_b_size = 4 # buffer size id_dict = ep06_dict[i] # new frame buffer f_buffer = [] for j in range(f_b_size): f_fname = imf #str('%05d'%(i-f_b_size+j+1))+'.jpg' f_img = imread_to_rgb(os.path.join(DB_PATH, f_fname)) f_buffer.append(f_img) # === process buffer for j in range(len(id_dict)): # crop person region coor=(xmin,ymin,xmax,ymax) # obj_coor = np.array([id_dict[j]['topleft']['x'],id_dict[j]['topleft']['y'], # id_dict[j]['bottomright']['x'],id_dict[j]['bottomright']['y'] ]).astype(int) obj_coor = np.array([id_dict[j]['bottomright']['x'],id_dict[j]['bottomright']['y'], id_dict[j]['topleft']['x'],id_dict[j]['topleft']['y'] ]).astype(int) obj_crop = crop_img(f_buffer[-1], obj_coor[0], obj_coor[1], obj_coor[2]-obj_coor[0], obj_coor[3]-obj_coor[1]) obj_id = int(id_dict[j]['id']) # detect faces faces_coor = np.array(face_recognition.face_locations((obj_crop*255).astype(np.uint8))).astype(int) if faces_coor.ndim < 2: # no face, continue continue else: num_faces = faces_coor.shape[0] # refine coordinates as (xmin,ymin,xmax,ymax) faces_coor = faces_coor[0] faces_coor = np.array([faces_coor[3], faces_coor[0], faces_coor[1], faces_coor[2]]) faces_coor[0] += obj_coor[0] faces_coor[1] += obj_coor[1] faces_coor[2] += obj_coor[0] faces_coor[3] += obj_coor[1] # extract faces and get emotions # get face crops from buffer coor = faces_coor #faces_coor[:,j] w = coor[2] - coor[0] h = coor[3] - coor[1] m = 0.5 s = np.sqrt((w+(w+h)*m)*(h+(w+h)*m)) f_crops = [crop_img(im, int(coor[0]+w*0.5), int(coor[1]+h*0.5), int(s), int(s), True) for im in f_buffer] f_batch = [cv2.resize(f_c, (224,224)) for f_c in f_crops] #plt.imsave('../../img_crop_ep06/'+str('%05d'%i)+'_'+str(j)+'.jpg', f_batch[0]) # net forward - get emotion f_batch = torch.Tensor(np.array(f_batch)).cuda() f_batch = f_batch.unsqueeze(0).permute(0,1,4,2,3) with torch.no_grad(): f_emo = net(f_batch)[0,-2:,:].mean(0).argmax().detach().cpu().numpy() # write text f_emo_txt = emo2txt(f_emo) json_txt = str('{"type": "emotion", "class": "%s", "frames": %d, "coordinates": [%d,%d,%d,%d], "id": %d}\n'%\ (f_emo_txt, i, coor[0],coor[1],coor[2],coor[3], obj_id)) out_json.write(json_txt) out_json.close() #float(i-1)*29907/6237.*1/23.98 ```
github_jupyter
import sys,os,time,cv2 import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import ops from resnet_tsm import resnet18 as resnet from utils import get_dtstr, emo2txt, imread_to_rgb, crop_img import face_recognition import moviepy.editor as mpe # restore net net = resnet().cuda() ckpt = torch.load(ops.weight_path+'/resnet18_tsm_weights.tar') net.load_state_dict(ckpt['model_state_dict']) net.eval() print 'net loaded' # episode_path # DB_PATH = '/home/jhchoi/datasets2/friends/' epi_sel = 'ep06' DB_PATH = '../../img_ep06/' # output path OUT_PATH = '../data/friends_s01_'+epi_sel+'.jsonl' out_json = open(OUT_PATH, 'w+') import json ep06_track = sorted(os.listdir('../../anno_ep06/')) ep06_dict = dict() for i,f in enumerate(ep06_track): with open('../../anno_ep06/'+f) as jsf: js = json.load(jsf) ep06_dict[i] = js for i,imf in enumerate(sorted(os.listdir(DB_PATH))): sys.stdout.write("\r"+str(i)+'/'+str(len(os.listdir(DB_PATH)))) f_b_size = 4 # buffer size id_dict = ep06_dict[i] # new frame buffer f_buffer = [] for j in range(f_b_size): f_fname = imf #str('%05d'%(i-f_b_size+j+1))+'.jpg' f_img = imread_to_rgb(os.path.join(DB_PATH, f_fname)) f_buffer.append(f_img) # === process buffer for j in range(len(id_dict)): # crop person region coor=(xmin,ymin,xmax,ymax) # obj_coor = np.array([id_dict[j]['topleft']['x'],id_dict[j]['topleft']['y'], # id_dict[j]['bottomright']['x'],id_dict[j]['bottomright']['y'] ]).astype(int) obj_coor = np.array([id_dict[j]['bottomright']['x'],id_dict[j]['bottomright']['y'], id_dict[j]['topleft']['x'],id_dict[j]['topleft']['y'] ]).astype(int) obj_crop = crop_img(f_buffer[-1], obj_coor[0], obj_coor[1], obj_coor[2]-obj_coor[0], obj_coor[3]-obj_coor[1]) obj_id = int(id_dict[j]['id']) # detect faces faces_coor = np.array(face_recognition.face_locations((obj_crop*255).astype(np.uint8))).astype(int) if faces_coor.ndim < 2: # no face, continue continue else: num_faces = faces_coor.shape[0] # refine coordinates as (xmin,ymin,xmax,ymax) faces_coor = faces_coor[0] faces_coor = np.array([faces_coor[3], faces_coor[0], faces_coor[1], faces_coor[2]]) faces_coor[0] += obj_coor[0] faces_coor[1] += obj_coor[1] faces_coor[2] += obj_coor[0] faces_coor[3] += obj_coor[1] # extract faces and get emotions # get face crops from buffer coor = faces_coor #faces_coor[:,j] w = coor[2] - coor[0] h = coor[3] - coor[1] m = 0.5 s = np.sqrt((w+(w+h)*m)*(h+(w+h)*m)) f_crops = [crop_img(im, int(coor[0]+w*0.5), int(coor[1]+h*0.5), int(s), int(s), True) for im in f_buffer] f_batch = [cv2.resize(f_c, (224,224)) for f_c in f_crops] #plt.imsave('../../img_crop_ep06/'+str('%05d'%i)+'_'+str(j)+'.jpg', f_batch[0]) # net forward - get emotion f_batch = torch.Tensor(np.array(f_batch)).cuda() f_batch = f_batch.unsqueeze(0).permute(0,1,4,2,3) with torch.no_grad(): f_emo = net(f_batch)[0,-2:,:].mean(0).argmax().detach().cpu().numpy() # write text f_emo_txt = emo2txt(f_emo) json_txt = str('{"type": "emotion", "class": "%s", "frames": %d, "coordinates": [%d,%d,%d,%d], "id": %d}\n'%\ (f_emo_txt, i, coor[0],coor[1],coor[2],coor[3], obj_id)) out_json.write(json_txt) out_json.close() #float(i-1)*29907/6237.*1/23.98
0.208179
0.189446
# Set up ``` import math import random %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd from pandas import DataFrame from scipy.optimize import minimize import seaborn as sns sns.set(style='ticks', context='paper') colors=["#e3c934","#68c4bf","#c51000","#287271"] sns.set_palette(colors) ``` ## Library ``` def barkbark(cloud,formants,formantchar='F'): newcloud=cloud.copy() for formant in formants: name = str(formant).replace(formantchar,'z') newcloud[name] = 26.81/ (1+ 1960/newcloud[formant]) - 0.53 return newcloud def activation(testset,cloud,dims = {'F0':4,'F1':2,'F2':3,'F3':1},c=0.01,rmspkr=True): # Get stuff ready dims.update((x, (y/sum(dims.values()))) for x, y in dims.items()) # Normalize weights to sum to 1 # If the testset happens to have N in it, remove it before joining dfs test=testset.copy() if 'N' in test.columns: test = test.drop(columns='N', axis=1,inplace=True) exemplars=cloud[~cloud.isin(test)].dropna() if rmspkr == True: spkr=test.speaker.iloc[0] spkrexemps=exemplars[ exemplars['speaker'] == spkr ].index exemplars.drop(spkrexemps, inplace=True) # Merge test and exemplars bigdf = pd.merge( test.assign(key=1), # Add column named 'key' with all values == 1 exemplars.assign(key=1), # Add column named 'key' with all values == 1 on='key', # Match on 'key' to get cross join (cartesian product) suffixes=['_t', '_ex'] ).drop('key', axis=1) # Drop 'key' column dimensions=list(dims.keys()) # Get dimensions from dictionary weights=list(dims.values()) # Get weights from dictionary tcols = [f'{d}_t' for d in dimensions] # Get names of all test columns excols = [f'{d}_ex' for d in dimensions] # Get names of all exemplar columns # Multiply each dimension by weights i = bigdf.loc[:, tcols].values.astype(float) # Get all the test columns i *= weights # Multiply test columns by weight j = bigdf.loc[:, excols].values.astype(float) # Get all the exemplar columns j *= weights # Multiply exemplar columns by weights # Get Euclidean distance bigdf['dist'] = np.sqrt(np.sum((i-j)**2, axis=1)) # get activation: exponent of negative distance * sensitivity c, multiplied by N_j bigdf['a'] = np.exp(-bigdf.dist*c)*bigdf.N return bigdf def reset_N(exemplars, N=1): # Add or override N, default to 1 exemplars['N'] = N return exemplars def probs(bigdf,cats): prs = {} for cat in cats: label = cat+'_ex' cat_a = bigdf.groupby(label).a.sum() pr = cat_a/sum(cat_a) pr = pr.rename_axis(cat).reset_index().rename(columns={"a":"probability"}) prs[cat]=pr return prs def choose(pr,test,cats,runnerup=False): newtest = test.copy() for cat in cats: choicename = cat + 'Choice' choiceprobname = cat + 'Prob' best2 = pr[cat]['probability'].nlargest(n=2).reset_index(drop=True) # Get the two highest probs for each cat type choiceprob = best2[0] # Match the prob to the category choice = pr[cat].loc[pr[cat]['probability']==choiceprob,cat].iloc[0] newtest[choicename] = choice newtest[choiceprobname] = choiceprob if runnerup == True: choice2name = cat + 'Choice2' choice2probname = cat +'Choice2Prob' choice2prob = best2[1] choice2 = pr[cat].loc[pr[cat]['probability']==choice2prob,cat].iloc[0] newtest[choice2name] = choice2 newtest[choice2probname] = choice2prob return newtest def gettestset(cloud,balcat,n): #Gets n number of rows per cat in given cattype testlist=[] for cat in list(cloud[balcat].unique()): samp = cloud[cloud[balcat]==cat].sample(n) testlist.append(samp) test=pd.concat(testlist) return test def multicat(cloud,testset,cats,dims = {'F0':4,'F1':2,'F2':3,'F3':1},c=0.01): choicelist=[] for ix in list(testset.index.values): test = testset.loc[[ix,]] #exemplars=cloud[~cloud.isin(test)].dropna() exemplars=cloud.copy() reset_N(exemplars) bigdf=activation(test,exemplars,dims = dims,c=c) pr=probs(bigdf,cats) choices = choose(pr,test,cats) choicelist.append(choices) choices=pd.concat(choicelist, ignore_index=True) return choices def checkaccuracy(choices,cats): acc = choices.copy() # Make a copy of choices to muck around with for cat in cats: # Iterate over your list of cats accname = cat + 'Acc' # Get the right column names choicename = cat + 'Choice' # If choice is the same as intended, acc =y, else n acc[accname] = np.where(acc[cat]==acc[choicename], 'y', 'n') return acc def propcorr(acc,cat): perc = dict(acc.groupby(cat)[cat+'Acc'].value_counts(normalize=True).drop(labels='n',level=1).reset_index(level=1,drop=True)) pc=pd.DataFrame.from_dict(perc, orient='index').reset_index() pc.columns=[cat,'propcorr'] return pc def overallacc(acc,cat): totalcorrect = acc[cat+'Acc'].value_counts(normalize=True)['y'] return totalcorrect def accplot(pc,cat,acc): obs=str(len(acc)) pl = sns.barplot(x=cat,y='propcorr',data=pc,palette=colors) plt.ylim(0,1.01) pl.set(ylabel='Proportion accurate of '+obs+' trials') pl.set_xticklabels( pl.get_xticklabels(), rotation=45, horizontalalignment='right', fontweight='light', fontsize='x-large') plt.show() def continuum (start,end,cloud,dimlist = ['F0','F1','F2','F3'],steps=7,df=False): vals = {} norms = {} rowlist = [] st=cloud[cloud['vowel'] == start].sample().reset_index(drop=True) # randomly sample a row with the some start vowel spkr = st.speaker.iloc[0] # Get the speaker and gender gen = st.gender.iloc[0] en=pb52[(cloud.speaker==spkr) & (cloud['vowel']==end)].sample().reset_index(drop=True) # get a row with the end vowel from same speaker for dim in dimlist: # Calculate the difference between start and end for each dim norms[dim] = en[dim] - st[dim] for i in range (0,steps): for dim in dimlist: vals[dim] = st[dim] + (norms[dim] * i/(steps-1)) # the values for each dim = start val + diff by step row = pd.DataFrame(vals) row['vowel'] = '?' row['speaker'] = spkr row['gender'] = gen rowlist.append(row) rowlist[0]['vowel'] = start # Change start and end vowels rowlist[-1]['vowel'] = end cont=pd.concat(rowlist,ignore_index=True) # concatenate if df == True: return cont else: return rowlist def FCN(exemplars,start,end): exemplars['N'] = np.where((exemplars['vowel']==start)|(exemplars['vowel']==end), 1, 0) return exemplars ## Include other speaker characteristics def choosecontinuum(cont,cloud,cats,start,end,FC=False): exemplars=cloud.copy() exemplars=exemplars[~exemplars.isin(cont)].dropna() choicelist = [] for row in cont: test=row if FC == True: FCN(exemplars,start,end) else: reset_N(exemplars) bigdf=activation(test,exemplars) pr=probs(bigdf,cats) choices=choose(pr,test,cats) choicelist.append(choices) choices=pd.concat(choicelist,ignore_index=True) return choices def resonate(pr,rescats,cloud,beta=0.25,gamma=0.25): #beta slows influence of first guess down, gives chance to revise guess exemplars = cloud.copy() for cat in rescats: probval = exemplars[cat].map(pr[cat].set_index(cat).to_dict()['probability']) # change N to existing N + some prop of probability exemplars['N'] = exemplars['N'] + (beta * probval) - (gamma * (1-probval)) return exemplars def multirescat(testset,dims,cloud,cats,c,ncycles,rescats,beta=0.25,gamma=0.25): choicelist=[] for ix in list(testset.index.values): #set the test and exemplar cloud for the trial test = testset.loc[[ix,]] exemplars=cloud[~cloud.isin(test)].dropna() reset_N(exemplars) #do the first categorization bigdf=activation(test,exemplars,dims = dims,c=c) pr=probs(bigdf,cats) #Start the resonance loop for cycle in range(0,ncycles): exemplars=resonate(pr,rescats,exemplars,beta=beta,gamma=gamma) bigdf=activation(test,exemplars,dims=dims,c=c) pr=probs(bigdf,cats) #Make a final categorization for that trial choices = choose(pr,test,cats) choicelist.append(choices) choices=pd.concat(choicelist, ignore_index=True) return choices def rescat(test,exemplars,dims,cats,c,n_res,rescats,beta=0.25,gamma=0.25): reset_N(exemplars) for i in range(0,n_res): act = activation(test,exemplars,dims=dims,c=c) prob = probs (act,cats) exemplars = resonate(prob,rescats,exemplars,beta=beta,gamma=gamma) choices = choose(prob,test,cats) return choices def multiaccplot(choices,cats): accuracy = checkaccuracy(choices,cats) for cat in cats: proportion = propcorr(accuracy,cat) accplot(proportion,cat,accuracy) print(proportion) def errorfunc(x, cloud,testset,dimslist,catslist): #x = [c,F0,F1,F2,F3] c=x[0] dimsdict={dimslist[0]:x[1],dimslist[1]:x[2],dimslist[2]:x[3],dimslist[3]:x[4]} choices=multicat(cloud,testset,catslist,dims=dimsdict,c=c) accuracy=checkaccuracy(choices,catslist) err = accuracy['vowel'+'Acc'].value_counts(normalize=True)['n'] return err def errorfunc_c(x, cloud,testset,dimsdict,catslist): c=x[0] choices=multicat(cloud,testset,catslist,dims=dimsdict,c=c) accuracy=checkaccuracy(choices,catslist) err = accuracy['vowel'+'Acc'].value_counts(normalize=True)['n'] return err def errorfunc_anchor(x, cloud,testset,dimslist,catslist): #x = [c,F1,F2,F3] c=x[0] dimsdict={dimslist[0]:1,dimslist[1]:x[1],dimslist[2]:x[2],dimslist[3]:x[3]} choices=multicat(cloud,testset,catslist,dims=dimsdict,c=c) accuracy=checkaccuracy(choices,catslist) err = accuracy['vowel'+'Acc'].value_counts(normalize=True)['n'] return err def confusion(choices,cats): matrices={} for cat in cats: matrices[cat]=pd.crosstab(choices[cat],choices[cat+'Choice'],normalize='index').round(2).rename_axis(None) return matrices def evalcycles(dictname,cats): accdict={} overallaccdict={} pcdict={} cmdict={} for dx in bgch.keys(): name='res'+str(dx) accdict[name]=checkaccuracy(bgch[dx],['vowel','type']) overallaccdict[name]=overallacc(accdict[name],'vowel') pcdict[name]=propcorr(accdict[name],'vowel') ``` ## data ``` pb52=pd.read_csv('pb52.csv') pbbark=barkbark(pb52,['F0','F1','F2','F3']) choices52=pd.read_csv('pb52choices.csv') ch={} for h in range(0,10): name='res'+str(h)+'cyc.csv' ch[h] = pd.read_csv(name) pbcm = pd.read_csv('pbcm.csv').drop([0]).set_index('vowelChoice').rename_axis(None) ``` # For paper ``` # Set parameters cval=55 dimsvals={'z0':1,'z1':2.953,'z2':.924,'z3':3.420} catslist=['vowel','type'] pbtest=pbbark.copy() rescats=['type'] subset=gettestset(pbbark,'vowel',50) testers= gettestset(pbbark,'vowel',5) choices = multicat(cloud=pbbark,testset=testers,cats=catslist,dims = dimsvals,c=cval) choices propcorr(checkaccuracy(choices,catslist),'vowel') ``` ### Resonance ``` a={} for h in range(0,3): a[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=rescats,beta=1,gamma=1) b={} for h in range(0,3): b[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=10,ncycles=h,rescats=rescats,beta=1,gamma=1) c={} for h in range(0,3): c[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=['type','vowel'],beta=1,gamma=1) d={} for h in range(0,3): d[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=rescats,beta=0,gamma=1) e={} for h in range(0,3): e[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=rescats,beta=1,gamma=0.5) f={} for h in range(0,3): f[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=1,ncycles=h,rescats=rescats,beta=1,gamma=0.5) accdict_f={} pcdict_f={} for dx in f.keys(): name='res'+str(dx) accdict_f[name]=checkaccuracy(f[dx],['vowel','type']) pcdict_f[name]=propcorr(accdict_f[name],'vowel') accdict_a={} pcdict_a={} for dx in a.keys(): name='res'+str(dx) accdict_a[name]=checkaccuracy(a[dx],['vowel','type']) pcdict_a[name]=propcorr(accdict_a[name],'vowel') accdict_b={} pcdict_b={} for dx in b.keys(): name='res'+str(dx) accdict_b[name]=checkaccuracy(b[dx],['vowel','type']) pcdict_b[name]=propcorr(accdict_b[name],'vowel') accdict_c={} pcdict_c={} for dx in c.keys(): name='res'+str(dx) accdict_c[name]=checkaccuracy(c[dx],['vowel','type']) pcdict_c[name]=propcorr(accdict_c[name],'vowel') accdict_d={} pcdict_d={} for dx in d.keys(): name='res'+str(dx) accdict_d[name]=checkaccuracy(d[dx],['vowel','type']) pcdict_d[name]=propcorr(accdict_d[name],'vowel') serieslist=[] for dx in a.keys(): name='res'+str(dx) series=pd.Series(pcdict_a[name]['propcorr'],name=name) serieslist.append(series) pcres_a = pd.concat(serieslist,axis=1) serieslist=[] for dx in b.keys(): name='res'+str(dx) series=pd.Series(pcdict_b[name]['propcorr'],name=name) serieslist.append(series) pcres_b = pd.concat(serieslist,axis=1) serieslist=[] for dx in c.keys(): name='res'+str(dx) series=pd.Series(pcdict_c[name]['propcorr'],name=name) serieslist.append(series) pcres_c = pd.concat(serieslist,axis=1) serieslist=[] for dx in d.keys(): name='res'+str(dx) series=pd.Series(pcdict_d[name]['propcorr'],name=name) serieslist.append(series) pcres_d = pd.concat(serieslist,axis=1) serieslist=[] for dx in f.keys(): name='res'+str(dx) series=pd.Series(pcdict_f[name]['propcorr'],name=name) serieslist.append(series) pcres_f = pd.concat(serieslist,axis=1) pcres_f ``` #### idk ``` #save each dict for dx in res5.keys(): name=str(dx)+'_res5.csv' res5[dx].to_csv(name) ###How to make function??? accdict_res4={} overallaccdict_res4={} pcdict_res4={} cmdict={} for dx in res4.keys(): name='res'+str(dx) accdict_res4[name]=checkaccuracy(res4[dx],['vowel','type']) overallaccdict_res4[name]=overallacc(accdict[name],'vowel') pcdict_res4[name]=propcorr(accdict[name],'vowel') pcdict serieslist=[] for dx in res3.keys(): name='res'+str(dx) series=pd.Series(pcdict_res3[name]['propcorr'],name=name) serieslist.append(series) pcres = pd.concat(serieslist,axis=1) print(pcres) serieslist=[] for dx in res2.keys(): name='res'+str(dx) series=pd.Series(pcdict_res2[name]['propcorr'],name=name) serieslist.append(series) pcres_res2 = pd.concat(serieslist,axis=1) print(pcres_res2) serieslist=[] for dx in res4.keys(): name='res'+str(dx) series=pd.Series(pcdict_res4[name]['propcorr'],name=name) serieslist.append(series) pcres_res4 = pd.concat(serieslist,axis=1) print(pcres_res4) ``` ## Categorize all data and check accuracy ``` confs = confusion(choices52,catslist) modelcm = confs['vowel'] pbcmfl=pd.Series(pbcm.stack(),name="PB") mcmfl=pd.Series(modelcm.stack(),name="GCM") cms cms=pd.concat([pbcmfl,mcmfl],axis=1) (((cms.PB-cms.GCM)**2).mean())**.5 correlation = cms['PB'].corr(cms['GCM']) correlation pb52_acc=checkaccuracy(choices52,catslist) pb52_pc=propcorr(pb52_acc,'vowel') pb52_pc accplot(pb52_pc, 'vowel', pb52_acc) overallacc(pb52_acc,'vowel') ``` ## Try different numbers of resonance? ``` accdict={} overallaccdict={} pcdict={} cmdict={} for dx in bgch.keys(): name='res'+str(dx) accdict[name]=checkaccuracy(bgch[dx],['vowel','type']) overallaccdict[name]=overallacc(accdict[name],'vowel') pcdict[name]=propcorr(accdict[name],'vowel') bgch={} for h in range(0,5): bgch[h]=multirescat(pbtest,dimsvals,pbbark,catslist,cval,h,beta=0.5,gamma=0.5) for dx in bgch.keys(): name=str(dx)+'wGamma_cyc.csv' bgch[dx].to_csv(name) ch[9]['vowelProb']-ch[8]['vowelProb'] serieslist=[] for dx in ch.keys(): name='res'+str(dx) series=pd.Series(cmdict[name]['vowel'].stack(),name=name) serieslist.append(series) rescm = pd.concat(serieslist,axis=1) rmsedict={} for dx in ch.keys(): name='res'+str(dx) rmsedict[name]=(((rescm.res0-rescm[name])**2).mean())**.5 rmsedict serieslist=[] for dx in bgch.keys(): name='res'+str(dx) series=pd.Series(pcdict[name]['propcorr'],name=name) serieslist.append(series) pcres = pd.concat(serieslist,axis=1) pcresdict={} for dx in ch.keys(): name='res'+str(dx) pcresdict[name]=(((pcres.res0-pcres[name])**2).mean())**.5 pcres pcres for q in pcdict.keys(): accplot(pcdict[q],'vowel',accdict[q]) accdict for dx in ch.keys(): name='res'+str(dx)+'cyc.csv' ch[dx].to_csv(name) ch.to_csv("res_cycles_1.csv") ``` ## scratch ``` continuum ('TRAP','PALM',pbbark,['F0','F1','F2','F3'],steps=7,df=True) ``` # Parameter fitting
github_jupyter
import math import random %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd from pandas import DataFrame from scipy.optimize import minimize import seaborn as sns sns.set(style='ticks', context='paper') colors=["#e3c934","#68c4bf","#c51000","#287271"] sns.set_palette(colors) def barkbark(cloud,formants,formantchar='F'): newcloud=cloud.copy() for formant in formants: name = str(formant).replace(formantchar,'z') newcloud[name] = 26.81/ (1+ 1960/newcloud[formant]) - 0.53 return newcloud def activation(testset,cloud,dims = {'F0':4,'F1':2,'F2':3,'F3':1},c=0.01,rmspkr=True): # Get stuff ready dims.update((x, (y/sum(dims.values()))) for x, y in dims.items()) # Normalize weights to sum to 1 # If the testset happens to have N in it, remove it before joining dfs test=testset.copy() if 'N' in test.columns: test = test.drop(columns='N', axis=1,inplace=True) exemplars=cloud[~cloud.isin(test)].dropna() if rmspkr == True: spkr=test.speaker.iloc[0] spkrexemps=exemplars[ exemplars['speaker'] == spkr ].index exemplars.drop(spkrexemps, inplace=True) # Merge test and exemplars bigdf = pd.merge( test.assign(key=1), # Add column named 'key' with all values == 1 exemplars.assign(key=1), # Add column named 'key' with all values == 1 on='key', # Match on 'key' to get cross join (cartesian product) suffixes=['_t', '_ex'] ).drop('key', axis=1) # Drop 'key' column dimensions=list(dims.keys()) # Get dimensions from dictionary weights=list(dims.values()) # Get weights from dictionary tcols = [f'{d}_t' for d in dimensions] # Get names of all test columns excols = [f'{d}_ex' for d in dimensions] # Get names of all exemplar columns # Multiply each dimension by weights i = bigdf.loc[:, tcols].values.astype(float) # Get all the test columns i *= weights # Multiply test columns by weight j = bigdf.loc[:, excols].values.astype(float) # Get all the exemplar columns j *= weights # Multiply exemplar columns by weights # Get Euclidean distance bigdf['dist'] = np.sqrt(np.sum((i-j)**2, axis=1)) # get activation: exponent of negative distance * sensitivity c, multiplied by N_j bigdf['a'] = np.exp(-bigdf.dist*c)*bigdf.N return bigdf def reset_N(exemplars, N=1): # Add or override N, default to 1 exemplars['N'] = N return exemplars def probs(bigdf,cats): prs = {} for cat in cats: label = cat+'_ex' cat_a = bigdf.groupby(label).a.sum() pr = cat_a/sum(cat_a) pr = pr.rename_axis(cat).reset_index().rename(columns={"a":"probability"}) prs[cat]=pr return prs def choose(pr,test,cats,runnerup=False): newtest = test.copy() for cat in cats: choicename = cat + 'Choice' choiceprobname = cat + 'Prob' best2 = pr[cat]['probability'].nlargest(n=2).reset_index(drop=True) # Get the two highest probs for each cat type choiceprob = best2[0] # Match the prob to the category choice = pr[cat].loc[pr[cat]['probability']==choiceprob,cat].iloc[0] newtest[choicename] = choice newtest[choiceprobname] = choiceprob if runnerup == True: choice2name = cat + 'Choice2' choice2probname = cat +'Choice2Prob' choice2prob = best2[1] choice2 = pr[cat].loc[pr[cat]['probability']==choice2prob,cat].iloc[0] newtest[choice2name] = choice2 newtest[choice2probname] = choice2prob return newtest def gettestset(cloud,balcat,n): #Gets n number of rows per cat in given cattype testlist=[] for cat in list(cloud[balcat].unique()): samp = cloud[cloud[balcat]==cat].sample(n) testlist.append(samp) test=pd.concat(testlist) return test def multicat(cloud,testset,cats,dims = {'F0':4,'F1':2,'F2':3,'F3':1},c=0.01): choicelist=[] for ix in list(testset.index.values): test = testset.loc[[ix,]] #exemplars=cloud[~cloud.isin(test)].dropna() exemplars=cloud.copy() reset_N(exemplars) bigdf=activation(test,exemplars,dims = dims,c=c) pr=probs(bigdf,cats) choices = choose(pr,test,cats) choicelist.append(choices) choices=pd.concat(choicelist, ignore_index=True) return choices def checkaccuracy(choices,cats): acc = choices.copy() # Make a copy of choices to muck around with for cat in cats: # Iterate over your list of cats accname = cat + 'Acc' # Get the right column names choicename = cat + 'Choice' # If choice is the same as intended, acc =y, else n acc[accname] = np.where(acc[cat]==acc[choicename], 'y', 'n') return acc def propcorr(acc,cat): perc = dict(acc.groupby(cat)[cat+'Acc'].value_counts(normalize=True).drop(labels='n',level=1).reset_index(level=1,drop=True)) pc=pd.DataFrame.from_dict(perc, orient='index').reset_index() pc.columns=[cat,'propcorr'] return pc def overallacc(acc,cat): totalcorrect = acc[cat+'Acc'].value_counts(normalize=True)['y'] return totalcorrect def accplot(pc,cat,acc): obs=str(len(acc)) pl = sns.barplot(x=cat,y='propcorr',data=pc,palette=colors) plt.ylim(0,1.01) pl.set(ylabel='Proportion accurate of '+obs+' trials') pl.set_xticklabels( pl.get_xticklabels(), rotation=45, horizontalalignment='right', fontweight='light', fontsize='x-large') plt.show() def continuum (start,end,cloud,dimlist = ['F0','F1','F2','F3'],steps=7,df=False): vals = {} norms = {} rowlist = [] st=cloud[cloud['vowel'] == start].sample().reset_index(drop=True) # randomly sample a row with the some start vowel spkr = st.speaker.iloc[0] # Get the speaker and gender gen = st.gender.iloc[0] en=pb52[(cloud.speaker==spkr) & (cloud['vowel']==end)].sample().reset_index(drop=True) # get a row with the end vowel from same speaker for dim in dimlist: # Calculate the difference between start and end for each dim norms[dim] = en[dim] - st[dim] for i in range (0,steps): for dim in dimlist: vals[dim] = st[dim] + (norms[dim] * i/(steps-1)) # the values for each dim = start val + diff by step row = pd.DataFrame(vals) row['vowel'] = '?' row['speaker'] = spkr row['gender'] = gen rowlist.append(row) rowlist[0]['vowel'] = start # Change start and end vowels rowlist[-1]['vowel'] = end cont=pd.concat(rowlist,ignore_index=True) # concatenate if df == True: return cont else: return rowlist def FCN(exemplars,start,end): exemplars['N'] = np.where((exemplars['vowel']==start)|(exemplars['vowel']==end), 1, 0) return exemplars ## Include other speaker characteristics def choosecontinuum(cont,cloud,cats,start,end,FC=False): exemplars=cloud.copy() exemplars=exemplars[~exemplars.isin(cont)].dropna() choicelist = [] for row in cont: test=row if FC == True: FCN(exemplars,start,end) else: reset_N(exemplars) bigdf=activation(test,exemplars) pr=probs(bigdf,cats) choices=choose(pr,test,cats) choicelist.append(choices) choices=pd.concat(choicelist,ignore_index=True) return choices def resonate(pr,rescats,cloud,beta=0.25,gamma=0.25): #beta slows influence of first guess down, gives chance to revise guess exemplars = cloud.copy() for cat in rescats: probval = exemplars[cat].map(pr[cat].set_index(cat).to_dict()['probability']) # change N to existing N + some prop of probability exemplars['N'] = exemplars['N'] + (beta * probval) - (gamma * (1-probval)) return exemplars def multirescat(testset,dims,cloud,cats,c,ncycles,rescats,beta=0.25,gamma=0.25): choicelist=[] for ix in list(testset.index.values): #set the test and exemplar cloud for the trial test = testset.loc[[ix,]] exemplars=cloud[~cloud.isin(test)].dropna() reset_N(exemplars) #do the first categorization bigdf=activation(test,exemplars,dims = dims,c=c) pr=probs(bigdf,cats) #Start the resonance loop for cycle in range(0,ncycles): exemplars=resonate(pr,rescats,exemplars,beta=beta,gamma=gamma) bigdf=activation(test,exemplars,dims=dims,c=c) pr=probs(bigdf,cats) #Make a final categorization for that trial choices = choose(pr,test,cats) choicelist.append(choices) choices=pd.concat(choicelist, ignore_index=True) return choices def rescat(test,exemplars,dims,cats,c,n_res,rescats,beta=0.25,gamma=0.25): reset_N(exemplars) for i in range(0,n_res): act = activation(test,exemplars,dims=dims,c=c) prob = probs (act,cats) exemplars = resonate(prob,rescats,exemplars,beta=beta,gamma=gamma) choices = choose(prob,test,cats) return choices def multiaccplot(choices,cats): accuracy = checkaccuracy(choices,cats) for cat in cats: proportion = propcorr(accuracy,cat) accplot(proportion,cat,accuracy) print(proportion) def errorfunc(x, cloud,testset,dimslist,catslist): #x = [c,F0,F1,F2,F3] c=x[0] dimsdict={dimslist[0]:x[1],dimslist[1]:x[2],dimslist[2]:x[3],dimslist[3]:x[4]} choices=multicat(cloud,testset,catslist,dims=dimsdict,c=c) accuracy=checkaccuracy(choices,catslist) err = accuracy['vowel'+'Acc'].value_counts(normalize=True)['n'] return err def errorfunc_c(x, cloud,testset,dimsdict,catslist): c=x[0] choices=multicat(cloud,testset,catslist,dims=dimsdict,c=c) accuracy=checkaccuracy(choices,catslist) err = accuracy['vowel'+'Acc'].value_counts(normalize=True)['n'] return err def errorfunc_anchor(x, cloud,testset,dimslist,catslist): #x = [c,F1,F2,F3] c=x[0] dimsdict={dimslist[0]:1,dimslist[1]:x[1],dimslist[2]:x[2],dimslist[3]:x[3]} choices=multicat(cloud,testset,catslist,dims=dimsdict,c=c) accuracy=checkaccuracy(choices,catslist) err = accuracy['vowel'+'Acc'].value_counts(normalize=True)['n'] return err def confusion(choices,cats): matrices={} for cat in cats: matrices[cat]=pd.crosstab(choices[cat],choices[cat+'Choice'],normalize='index').round(2).rename_axis(None) return matrices def evalcycles(dictname,cats): accdict={} overallaccdict={} pcdict={} cmdict={} for dx in bgch.keys(): name='res'+str(dx) accdict[name]=checkaccuracy(bgch[dx],['vowel','type']) overallaccdict[name]=overallacc(accdict[name],'vowel') pcdict[name]=propcorr(accdict[name],'vowel') pb52=pd.read_csv('pb52.csv') pbbark=barkbark(pb52,['F0','F1','F2','F3']) choices52=pd.read_csv('pb52choices.csv') ch={} for h in range(0,10): name='res'+str(h)+'cyc.csv' ch[h] = pd.read_csv(name) pbcm = pd.read_csv('pbcm.csv').drop([0]).set_index('vowelChoice').rename_axis(None) # Set parameters cval=55 dimsvals={'z0':1,'z1':2.953,'z2':.924,'z3':3.420} catslist=['vowel','type'] pbtest=pbbark.copy() rescats=['type'] subset=gettestset(pbbark,'vowel',50) testers= gettestset(pbbark,'vowel',5) choices = multicat(cloud=pbbark,testset=testers,cats=catslist,dims = dimsvals,c=cval) choices propcorr(checkaccuracy(choices,catslist),'vowel') a={} for h in range(0,3): a[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=rescats,beta=1,gamma=1) b={} for h in range(0,3): b[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=10,ncycles=h,rescats=rescats,beta=1,gamma=1) c={} for h in range(0,3): c[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=['type','vowel'],beta=1,gamma=1) d={} for h in range(0,3): d[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=rescats,beta=0,gamma=1) e={} for h in range(0,3): e[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=cval,ncycles=h,rescats=rescats,beta=1,gamma=0.5) f={} for h in range(0,3): f[h]=multirescat(testset=subset,dims=dimsvals,cloud=pbbark,cats=catslist,c=1,ncycles=h,rescats=rescats,beta=1,gamma=0.5) accdict_f={} pcdict_f={} for dx in f.keys(): name='res'+str(dx) accdict_f[name]=checkaccuracy(f[dx],['vowel','type']) pcdict_f[name]=propcorr(accdict_f[name],'vowel') accdict_a={} pcdict_a={} for dx in a.keys(): name='res'+str(dx) accdict_a[name]=checkaccuracy(a[dx],['vowel','type']) pcdict_a[name]=propcorr(accdict_a[name],'vowel') accdict_b={} pcdict_b={} for dx in b.keys(): name='res'+str(dx) accdict_b[name]=checkaccuracy(b[dx],['vowel','type']) pcdict_b[name]=propcorr(accdict_b[name],'vowel') accdict_c={} pcdict_c={} for dx in c.keys(): name='res'+str(dx) accdict_c[name]=checkaccuracy(c[dx],['vowel','type']) pcdict_c[name]=propcorr(accdict_c[name],'vowel') accdict_d={} pcdict_d={} for dx in d.keys(): name='res'+str(dx) accdict_d[name]=checkaccuracy(d[dx],['vowel','type']) pcdict_d[name]=propcorr(accdict_d[name],'vowel') serieslist=[] for dx in a.keys(): name='res'+str(dx) series=pd.Series(pcdict_a[name]['propcorr'],name=name) serieslist.append(series) pcres_a = pd.concat(serieslist,axis=1) serieslist=[] for dx in b.keys(): name='res'+str(dx) series=pd.Series(pcdict_b[name]['propcorr'],name=name) serieslist.append(series) pcres_b = pd.concat(serieslist,axis=1) serieslist=[] for dx in c.keys(): name='res'+str(dx) series=pd.Series(pcdict_c[name]['propcorr'],name=name) serieslist.append(series) pcres_c = pd.concat(serieslist,axis=1) serieslist=[] for dx in d.keys(): name='res'+str(dx) series=pd.Series(pcdict_d[name]['propcorr'],name=name) serieslist.append(series) pcres_d = pd.concat(serieslist,axis=1) serieslist=[] for dx in f.keys(): name='res'+str(dx) series=pd.Series(pcdict_f[name]['propcorr'],name=name) serieslist.append(series) pcres_f = pd.concat(serieslist,axis=1) pcres_f #save each dict for dx in res5.keys(): name=str(dx)+'_res5.csv' res5[dx].to_csv(name) ###How to make function??? accdict_res4={} overallaccdict_res4={} pcdict_res4={} cmdict={} for dx in res4.keys(): name='res'+str(dx) accdict_res4[name]=checkaccuracy(res4[dx],['vowel','type']) overallaccdict_res4[name]=overallacc(accdict[name],'vowel') pcdict_res4[name]=propcorr(accdict[name],'vowel') pcdict serieslist=[] for dx in res3.keys(): name='res'+str(dx) series=pd.Series(pcdict_res3[name]['propcorr'],name=name) serieslist.append(series) pcres = pd.concat(serieslist,axis=1) print(pcres) serieslist=[] for dx in res2.keys(): name='res'+str(dx) series=pd.Series(pcdict_res2[name]['propcorr'],name=name) serieslist.append(series) pcres_res2 = pd.concat(serieslist,axis=1) print(pcres_res2) serieslist=[] for dx in res4.keys(): name='res'+str(dx) series=pd.Series(pcdict_res4[name]['propcorr'],name=name) serieslist.append(series) pcres_res4 = pd.concat(serieslist,axis=1) print(pcres_res4) confs = confusion(choices52,catslist) modelcm = confs['vowel'] pbcmfl=pd.Series(pbcm.stack(),name="PB") mcmfl=pd.Series(modelcm.stack(),name="GCM") cms cms=pd.concat([pbcmfl,mcmfl],axis=1) (((cms.PB-cms.GCM)**2).mean())**.5 correlation = cms['PB'].corr(cms['GCM']) correlation pb52_acc=checkaccuracy(choices52,catslist) pb52_pc=propcorr(pb52_acc,'vowel') pb52_pc accplot(pb52_pc, 'vowel', pb52_acc) overallacc(pb52_acc,'vowel') accdict={} overallaccdict={} pcdict={} cmdict={} for dx in bgch.keys(): name='res'+str(dx) accdict[name]=checkaccuracy(bgch[dx],['vowel','type']) overallaccdict[name]=overallacc(accdict[name],'vowel') pcdict[name]=propcorr(accdict[name],'vowel') bgch={} for h in range(0,5): bgch[h]=multirescat(pbtest,dimsvals,pbbark,catslist,cval,h,beta=0.5,gamma=0.5) for dx in bgch.keys(): name=str(dx)+'wGamma_cyc.csv' bgch[dx].to_csv(name) ch[9]['vowelProb']-ch[8]['vowelProb'] serieslist=[] for dx in ch.keys(): name='res'+str(dx) series=pd.Series(cmdict[name]['vowel'].stack(),name=name) serieslist.append(series) rescm = pd.concat(serieslist,axis=1) rmsedict={} for dx in ch.keys(): name='res'+str(dx) rmsedict[name]=(((rescm.res0-rescm[name])**2).mean())**.5 rmsedict serieslist=[] for dx in bgch.keys(): name='res'+str(dx) series=pd.Series(pcdict[name]['propcorr'],name=name) serieslist.append(series) pcres = pd.concat(serieslist,axis=1) pcresdict={} for dx in ch.keys(): name='res'+str(dx) pcresdict[name]=(((pcres.res0-pcres[name])**2).mean())**.5 pcres pcres for q in pcdict.keys(): accplot(pcdict[q],'vowel',accdict[q]) accdict for dx in ch.keys(): name='res'+str(dx)+'cyc.csv' ch[dx].to_csv(name) ch.to_csv("res_cycles_1.csv") continuum ('TRAP','PALM',pbbark,['F0','F1','F2','F3'],steps=7,df=True)
0.400867
0.723932
``` import pandas as pd from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.metrics import silhouette_score,calinski_harabasz_score from functools import partial from matplotlib import pyplot as plt import numpy as np import seaborn as sns ``` ## Wczytanie danych ``` df = pd.read_csv("../../clustering.csv",header=None,names=['x','y']) ``` ## Podstawowe informacje o zbiorze ``` df.info() df.describe() sns.scatterplot(df['x'],df['y']) ``` Widać iż zbiór jest wyraźnie podzielony na kilka klas, ale cięzko jednoznacznie stwierdzić na ile konkretnie. ## Standaryzacja zmiennych ``` scaler = StandardScaler() df[df.columns] = scaler.fit_transform(df[df.columns]) df.describe() ``` # Kmeans Jak pierwszą metodę zdecydowałem się użyć metodę Kmeans. Aby dobrać odpowiednie k, użyję metody łokcia. ``` def plot_score_kmeans(df,metod,title): scors = [] for k in range(2, 20): met = metod(k) met.fit(df) scors.append(met.inertia_) fig = plt.figure(figsize=(15, 5)) plt.plot(range(2, 20), scors,marker='h') plt.xticks(np.arange(1,21, 2)) plt.grid(True) plt.title(title) model = partial(KMeans) plot_score_kmeans(df,model,title = 'Elbow curve') ``` Cięzko jednoznacznie na podstawie tego wykresu dobrać odpowiednie k. Ja zdecyduję się na sprawdzenie k równego 7,8 oraz 9. ``` def plot(df,model): data = df.copy() label = model.fit_predict(data) data['label'] = label sns.lmplot(data = data, x='x', y='y', hue='label', fit_reg=False) model = KMeans(n_clusters=7) plot(df,model) model = KMeans(n_clusters=8) plot(df,model) model = KMeans(n_clusters=9) plot(df,model) ``` Oba wydają się dość racjonalne, aczkolwiek k=8 dzieli dane trochę porównywalne jak k=9, a także zdecydowanie lepiej niż 7 w przypadku którego klaster na górze wydaje się być sztuczne złączony w jeden. # Gausian mixture Drugim algorytem którego użyję będzie Gaussian Mixture, a odpowiednią ilość klastrów dla niego znajdę za pomocą metody silhouette. ``` def plot_score_gauss(df,metod,title): scors = [] for k in range(2, 20): met = metod(k) label = met.fit_predict(df) scors.append(silhouette_score(df,label)) fig = plt.figure(figsize=(15, 5)) plt.plot(range(2, 20), scors,marker='h') plt.xticks(np.arange(1,21, 2)) plt.grid(True) plt.title(title) model = partial(GaussianMixture) plot_score_gauss(df,model,title = 'Gasuian sillhoute') ``` Tym razem wyraźnie widać, że najelepszy wynik mamy dla k=9. ``` db = GaussianMixture(n_components=9) plot(df,db) ``` Wynik który uzyskaliśmy dośc mocno różni, się zwłaszcza dl klastra pośrodku, oraz tego na górze. Z racji na brak etykiet cięzko mi stwierdzić, który algorytm lepiej sobie poradził, ale moje odczucie jest takie że Kmeans lepiej wypełnił swoje zadanie. ## Porównanie Podziały dokonane przez algorytmy postaram się porównać miarą znaną jako Variance Ratio Criterion. Im wyższy wynik tej miary, tym lepiej zdefiniowane są klastry. ``` kmeans_model8 = KMeans(n_clusters=8).fit(df) kmeans_model9 = KMeans(n_clusters=9).fit(df) labels8 = kmeans_model8.labels_ labels9 = kmeans_model9.labels_ labels8g= GaussianMixture(8).fit_predict(df) labels9g= GaussianMixture(9).fit_predict(df) print(f'Wynik uzyskany przez Kmeans dla k=8 to :{calinski_harabasz_score(df,labels8)}') print(f'Wynik uzyskany przez Kmeans dla k=9 to :{calinski_harabasz_score(df,labels9)}') print(f'Wynik uzyskany przez Gausian Mixture dla k=8 to : {calinski_harabasz_score(df,labels8g)}') print(f'Wynik uzyskany przez Gausian Mixture dla k=9 to : {calinski_harabasz_score(df,labels9g)}') ``` Obie metody prezentują się dobrze, aczkolwiek Kmeans z k=9 uzyskałl lepszy wynik wedle tej miary i to on może zostać ogłoszony zwycięzcą tego porównania.
github_jupyter
import pandas as pd from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.metrics import silhouette_score,calinski_harabasz_score from functools import partial from matplotlib import pyplot as plt import numpy as np import seaborn as sns df = pd.read_csv("../../clustering.csv",header=None,names=['x','y']) df.info() df.describe() sns.scatterplot(df['x'],df['y']) scaler = StandardScaler() df[df.columns] = scaler.fit_transform(df[df.columns]) df.describe() def plot_score_kmeans(df,metod,title): scors = [] for k in range(2, 20): met = metod(k) met.fit(df) scors.append(met.inertia_) fig = plt.figure(figsize=(15, 5)) plt.plot(range(2, 20), scors,marker='h') plt.xticks(np.arange(1,21, 2)) plt.grid(True) plt.title(title) model = partial(KMeans) plot_score_kmeans(df,model,title = 'Elbow curve') def plot(df,model): data = df.copy() label = model.fit_predict(data) data['label'] = label sns.lmplot(data = data, x='x', y='y', hue='label', fit_reg=False) model = KMeans(n_clusters=7) plot(df,model) model = KMeans(n_clusters=8) plot(df,model) model = KMeans(n_clusters=9) plot(df,model) def plot_score_gauss(df,metod,title): scors = [] for k in range(2, 20): met = metod(k) label = met.fit_predict(df) scors.append(silhouette_score(df,label)) fig = plt.figure(figsize=(15, 5)) plt.plot(range(2, 20), scors,marker='h') plt.xticks(np.arange(1,21, 2)) plt.grid(True) plt.title(title) model = partial(GaussianMixture) plot_score_gauss(df,model,title = 'Gasuian sillhoute') db = GaussianMixture(n_components=9) plot(df,db) kmeans_model8 = KMeans(n_clusters=8).fit(df) kmeans_model9 = KMeans(n_clusters=9).fit(df) labels8 = kmeans_model8.labels_ labels9 = kmeans_model9.labels_ labels8g= GaussianMixture(8).fit_predict(df) labels9g= GaussianMixture(9).fit_predict(df) print(f'Wynik uzyskany przez Kmeans dla k=8 to :{calinski_harabasz_score(df,labels8)}') print(f'Wynik uzyskany przez Kmeans dla k=9 to :{calinski_harabasz_score(df,labels9)}') print(f'Wynik uzyskany przez Gausian Mixture dla k=8 to : {calinski_harabasz_score(df,labels8g)}') print(f'Wynik uzyskany przez Gausian Mixture dla k=9 to : {calinski_harabasz_score(df,labels9g)}')
0.589126
0.823328
``` #Add needed imports import numpy as np import pandas as pd import os import shap from numpy import mean from imblearn.over_sampling import SMOTENC from sklearn.preprocessing import RobustScaler from xgboost import XGBClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import StratifiedKFold import tensorflow as tf from tensorflow import keras import numpy as np import keras import pydot import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import RobustScaler from tensorflow.keras.layers import Input, Dense, Activation,Dropout from tensorflow.keras.models import Sequential, Model from tensorflow.keras import layers from keras.utils import plot_model from imblearn.over_sampling import SMOTENC from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score from sklearn.model_selection import StratifiedKFold import os import shap from tensorflow.compat.v1.keras.backend import get_session tf.compat.v1.disable_v2_behavior() #Read data proccessed_data_path =os.path.join(os.path.pardir,os.path.pardir,'data','processed') train_path = os.path.join(proccessed_data_path,'dataset7.csv') df = pd.read_csv(train_path) labels=df['Churn'] x = df.drop(columns=['Churn','Unnamed: 0'],axis = 'columns') y=np.ravel(labels) oversample = SMOTENC(categorical_features=[9,10,11,12]) x, y = oversample.fit_resample(x, y) sc = RobustScaler() x = pd.DataFrame(sc.fit_transform(x),columns = x.columns) ii = 1 xgb_model = XGBClassifier(random_state=1,learning_rate=0.05, max_depth=7,eval_metric='mlogloss',use_label_encoder =False ,objective="binary:logistic") dt_model=DecisionTreeClassifier(random_state=1,criterion='entropy',max_depth = 7,min_samples_leaf=30) nn_model = Sequential() nn_model.add(Dense(64, kernel_regularizer=tf.keras.regularizers.l2(0.001), input_dim=13, activation='relu' )) nn_model.add(Dropout(rate=0.2)) nn_model.add(Dense(8,kernel_regularizer=tf.keras.regularizers.l2(0.001),activation='relu')) nn_model.add(Dropout(rate=0.1)) nn_model.add(Dense(1, activation='sigmoid')) callback = tf.keras.callbacks.EarlyStopping(monitor='val_acc',patience=70,restore_best_weights=True) cv = StratifiedKFold(n_splits=10) for train_index, test_index in cv.split(x, y): print("Iteration" , ii) Xi_train, Xi_test = x.loc[train_index], x.loc[train_index] yi_train, yi_test = y[train_index], y[test_index] xgb_model.fit(Xi_train,yi_train) shap_values = shap.TreeExplainer(xgb_model).shap_values(Xi_test,approximate=True) #shap.summary_plot(shap_values, Xi_test, plot_type="bar") vals = np.abs(shap_values).mean(axis=0) print(pd.DataFrame(list(zip(vals)),columns=['XGB - feature_importance_vals']).to_string(index=False)) Xi_train, Xi_test = x.loc[train_index], x.loc[test_index] yi_train, yi_test = y[train_index], y[test_index] dt_model.fit(Xi_train,yi_train) explainer = shap.TreeExplainer(dt_model) shap_values = explainer.shap_values(Xi_test) #shap.summary_plot(shap_values[1], Xi_test, plot_type="bar") vals = np.abs(shap_values[1]).mean(axis=0) print( pd.DataFrame(list(zip(vals)),columns=['DT - feature_importance_vals']).to_string(index=False)) lr_schedule= tf.keras.optimizers.schedules.InverseTimeDecay( 0.001,decay_steps=(Xi_train.shape[0]/32)*50,decay_rate=1,staircase=False) nn_model.compile(loss = "binary_crossentropy", optimizer = tf.keras.optimizers.Adam(lr_schedule), metrics=['accuracy']) nn_model.fit(Xi_train, yi_train, validation_data=(Xi_test, yi_test), epochs=150, batch_size=10,verbose=0,callbacks=[callback]) explainer = shap.DeepExplainer(nn_model, Xi_train[1:100].to_numpy()) shap_values = explainer.shap_values(Xi_test[1:20].to_numpy()) vals = np.abs(shap_values[0]).mean(axis=0) print( pd.DataFrame(list(zip(vals)),columns=['NN - feature_importance_vals']).to_string(index=False)) ii += 1 ```
github_jupyter
#Add needed imports import numpy as np import pandas as pd import os import shap from numpy import mean from imblearn.over_sampling import SMOTENC from sklearn.preprocessing import RobustScaler from xgboost import XGBClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import StratifiedKFold import tensorflow as tf from tensorflow import keras import numpy as np import keras import pydot import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import RobustScaler from tensorflow.keras.layers import Input, Dense, Activation,Dropout from tensorflow.keras.models import Sequential, Model from tensorflow.keras import layers from keras.utils import plot_model from imblearn.over_sampling import SMOTENC from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score from sklearn.model_selection import StratifiedKFold import os import shap from tensorflow.compat.v1.keras.backend import get_session tf.compat.v1.disable_v2_behavior() #Read data proccessed_data_path =os.path.join(os.path.pardir,os.path.pardir,'data','processed') train_path = os.path.join(proccessed_data_path,'dataset7.csv') df = pd.read_csv(train_path) labels=df['Churn'] x = df.drop(columns=['Churn','Unnamed: 0'],axis = 'columns') y=np.ravel(labels) oversample = SMOTENC(categorical_features=[9,10,11,12]) x, y = oversample.fit_resample(x, y) sc = RobustScaler() x = pd.DataFrame(sc.fit_transform(x),columns = x.columns) ii = 1 xgb_model = XGBClassifier(random_state=1,learning_rate=0.05, max_depth=7,eval_metric='mlogloss',use_label_encoder =False ,objective="binary:logistic") dt_model=DecisionTreeClassifier(random_state=1,criterion='entropy',max_depth = 7,min_samples_leaf=30) nn_model = Sequential() nn_model.add(Dense(64, kernel_regularizer=tf.keras.regularizers.l2(0.001), input_dim=13, activation='relu' )) nn_model.add(Dropout(rate=0.2)) nn_model.add(Dense(8,kernel_regularizer=tf.keras.regularizers.l2(0.001),activation='relu')) nn_model.add(Dropout(rate=0.1)) nn_model.add(Dense(1, activation='sigmoid')) callback = tf.keras.callbacks.EarlyStopping(monitor='val_acc',patience=70,restore_best_weights=True) cv = StratifiedKFold(n_splits=10) for train_index, test_index in cv.split(x, y): print("Iteration" , ii) Xi_train, Xi_test = x.loc[train_index], x.loc[train_index] yi_train, yi_test = y[train_index], y[test_index] xgb_model.fit(Xi_train,yi_train) shap_values = shap.TreeExplainer(xgb_model).shap_values(Xi_test,approximate=True) #shap.summary_plot(shap_values, Xi_test, plot_type="bar") vals = np.abs(shap_values).mean(axis=0) print(pd.DataFrame(list(zip(vals)),columns=['XGB - feature_importance_vals']).to_string(index=False)) Xi_train, Xi_test = x.loc[train_index], x.loc[test_index] yi_train, yi_test = y[train_index], y[test_index] dt_model.fit(Xi_train,yi_train) explainer = shap.TreeExplainer(dt_model) shap_values = explainer.shap_values(Xi_test) #shap.summary_plot(shap_values[1], Xi_test, plot_type="bar") vals = np.abs(shap_values[1]).mean(axis=0) print( pd.DataFrame(list(zip(vals)),columns=['DT - feature_importance_vals']).to_string(index=False)) lr_schedule= tf.keras.optimizers.schedules.InverseTimeDecay( 0.001,decay_steps=(Xi_train.shape[0]/32)*50,decay_rate=1,staircase=False) nn_model.compile(loss = "binary_crossentropy", optimizer = tf.keras.optimizers.Adam(lr_schedule), metrics=['accuracy']) nn_model.fit(Xi_train, yi_train, validation_data=(Xi_test, yi_test), epochs=150, batch_size=10,verbose=0,callbacks=[callback]) explainer = shap.DeepExplainer(nn_model, Xi_train[1:100].to_numpy()) shap_values = explainer.shap_values(Xi_test[1:20].to_numpy()) vals = np.abs(shap_values[0]).mean(axis=0) print( pd.DataFrame(list(zip(vals)),columns=['NN - feature_importance_vals']).to_string(index=False)) ii += 1
0.492676
0.367696
&emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&ensp; [Home Page](../START_HERE.ipynb) [Previous Notebook](01-Intro_to_Dask.ipynb) &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; [1](01-Intro_to_Dask.ipynb) [2] [3](03-CuML_and_Dask.ipynb) [4](04-Challenge.ipynb) [5](05-Challenge_Solution.ipynb) &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; [Next Notebook](03-CuML_and_Dask.ipynb) # Introduction to cuDF and Dask-cuDF ======================= Modeled after 10 Minutes to Pandas, this is a short introduction to cuDF and Dask-cuDF, geared mainly for new users. The tutorial is split into modules with embedded exercises for you to practice the concepts. All the concepts have both CuDF and Dask-CuDF syntax for enhanced understanding. ### What are these Libraries? [cuDF](https://github.com/rapidsai/cudf) is a Python GPU DataFrame library (built on the Apache Arrow columnar memory format) for loading, joining, aggregating, filtering, and otherwise manipulating data. [Dask](https://dask.org/) is a flexible library for parallel computing in Python that makes scaling out your workflow smooth and simple. [Dask-cuDF](https://github.com/rapidsai/dask-cudf) is a library that provides a partitioned, GPU-backed dataframe, using Dask. ### When to use cuDF and Dask-cuDF If your workflow is fast enough on a single GPU or your data comfortably fits in memory on a single GPU, you would want to use cuDF. If you want to distribute your workflow across multiple GPUs, have more data than you can fit in memory on a single GPU, or want to analyze data spread across many files at once, you would want to use Dask-cuDF. ## Here is the list of contents in the lab: - <a href='#objcreation'>Creating Dask-CuDF Objects</a><br> This module shows you how to work with Dask-CuDF dataframes, the distributed GPU equivalent of Pandas dataframes, for faster data transactions. It includes creating Dask-CuDF objects, viewing data, selecting data, boolean indexing and dealing with missing data. - <a href='#operation'>Operations</a><br> Learn how to view descriptive statistics, perform string operations, histogramming, concatenate, joins, append, group data and use applymap. - <a href='#time'>TimeSeries</a><br> Introduction to using TimeSeries data format in Dask-CuDF - <a href='#cat'>Categoricals</a><br> Introduction to using categorical data in Dask-CuDF - <a href='#condatarep'>Converting Data Representations</a><br> Here we will work with converting data representations, including Arrow, Pandas and Numpy, that are commonly required in data science pipelines. - <a href='#datainout'>Getting Data In and Out</a><br> Transfering Dask-CuDf dataframes to and from CSV and Parquet files. ``` import os import numpy as np import pandas as pd import cudf import dask_cudf np.random.seed(12) #### Portions of this were borrowed and adapted from the #### cuDF cheatsheet, existing cuDF documentation, #### and 10 Minutes to Pandas. ``` <a id='objcreation'></a> Object Creation --------------- Creating a `cudf.Series` and `dask_cudf.Series`. ``` s = cudf.Series([1,2,3,None,4]) print(s) ds = dask_cudf.from_cudf(s, npartitions=2) print(ds.compute()) ``` Creating a `cudf.DataFrame` and a `dask_cudf.DataFrame` by specifying values for each column. ``` df = cudf.DataFrame({'a': list(range(20)), 'b': list(reversed(range(20))), 'c': list(range(20)) }) ddf = dask_cudf.from_cudf(df, npartitions=2) print(ddf.compute()) ``` Creating a `cudf.DataFrame` from a pandas `Dataframe` and a `dask_cudf.Dataframe` from a `cudf.Dataframe`. *Note that best practice for using Dask-cuDF is to read data directly into a `dask_cudf.DataFrame` with something like `read_csv` (discussed below).* ``` pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]}) gdf = cudf.DataFrame.from_pandas(pdf) print(gdf) dask_df = dask_cudf.from_cudf(pdf, npartitions=2) dask_gdf = dask_cudf.from_dask_dataframe(dask_df) print(dask_gdf.compute()) ``` <a id='viewing'></a><br> Viewing Data ------------- Viewing the top rows of a GPU dataframe. ``` print(df.head(2)) print(ddf.head(2)) ``` Sorting by values. ``` print(df.sort_values(by='b')) print(ddf.sort_values(by='b').compute()) ``` <a id='selection'></a> Selection ------------ ## Getting Selecting a single column, which initially yields a `cudf.Series` or `dask_cudf.Series`. Calling `compute` results in a `cudf.Series` (equivalent to `df.a`). ``` print(df['a']) print(ddf['a'].compute()) ``` <a id='sellabel'></a> ## Selection by Label Selecting rows from index 2 to index 5 from columns 'a' and 'b'. ``` print(df.loc[2:5, ['a', 'b']]) print(ddf.loc[2:5, ['a', 'b']].compute()) ``` <a id='selpos'></a> ## Selection by Position Selecting via integers and integer slices, like numpy/pandas. Note that this functionality is not available for Dask-cuDF DataFrames. ``` print(df.iloc[0]) print(df.iloc[0:3, 0:2]) ``` You can also select elements of a `DataFrame` or `Series` with direct index access. ``` print(df[3:5]) print(s[3:5]) ``` <a id='boolean'></a> ## Boolean Indexing Selecting rows in a `DataFrame` or `Series` by direct Boolean indexing. ``` print(df[df.b > 15]) print(ddf[ddf.b > 15].compute()) ``` Selecting values from a `DataFrame` where a Boolean condition is met, via the `query` API. ``` print(df.query("b == 3")) print(ddf.query("b == 3").compute()) ``` You can also pass local variables to Dask-cuDF queries, via the `local_dict` keyword. With standard cuDF, you may either use the `local_dict` keyword or directly pass the variable via the `@` keyword. ``` cudf_comparator = 3 print(df.query("b == @cudf_comparator")) dask_cudf_comparator = 3 print(ddf.query("b == @val", local_dict={'val':dask_cudf_comparator}).compute()) ``` Supported logical operators include `>`, `<`, `>=`, `<=`, `==`, and `!=`. <a id='multi'></a><br> ## MultiIndex cuDF supports hierarchical indexing of DataFrames using MultiIndex. Grouping hierarchically (see `Grouping` below) automatically produces a DataFrame with a MultiIndex. ``` arrays = [['a', 'a', 'b', 'b'], [1, 2, 3, 4]] tuples = list(zip(*arrays)) idx = cudf.MultiIndex.from_tuples(tuples) idx ``` This index can back either axis of a DataFrame. ``` gdf1 = cudf.DataFrame({'first': np.random.rand(4), 'second': np.random.rand(4)}) gdf1.index = idx print(gdf1.to_pandas()) gdf2 = cudf.DataFrame({'first': np.random.rand(4), 'second': np.random.rand(4)}).T gdf2.columns = idx print(gdf2.to_pandas()) ``` Accessing values of a DataFrame with a MultiIndex. Note that slicing is not yet supported. ``` print(gdf1.loc[('b', 3)].to_pandas()) ``` <a id='missing'></a><br> Missing Data ------------ Missing data can be replaced by using the `fillna` method. ``` print(s.fillna(999)) print(ds.fillna(999).compute()) ``` <a id='operation'></a><br> Operations ------------ <a id='stats'></a><br> ## Stats Calculating descriptive statistics for a `Series`. ``` print(s.mean(), s.var()) print(ds.mean().compute(), ds.var().compute()) ``` <a id='applymap'></a><br> ## Applymap Applying functions to a `Series`. Note that applying user defined functions directly with Dask-cuDF is not yet implemented. For now, you can use [map_partitions](http://docs.dask.org/en/stable/dataframe-api.html#dask.dataframe.DataFrame.map_partitions) to apply a function to each partition of the distributed dataframe. ``` def add_ten(num): return num + 10 print(df['a'].applymap(add_ten)) print(ddf['a'].map_partitions(add_ten).compute()) ``` <a id='histo'></a> ## Histogramming Counting the number of occurrences of each unique value of variable. ``` print(df.a.value_counts()) print(ddf.a.value_counts().compute()) ``` <a id='string'></a><br> ## String Methods Like pandas, cuDF provides string processing methods in the `str` attribute of `Series`. Full documentation of string methods is a work in progress. Please see the cuDF API documentation for more information. ``` s = cudf.Series(['A', 'B', 'C', 'Aaba', 'Baca', None, 'CABA', 'dog', 'cat']) print(s.str.lower()) ds = dask_cudf.from_cudf(s, npartitions=2) print(ds.str.lower().compute()) ``` <a id='concat'></a><br> ## Concat Concatenating `Series` and `DataFrames` row-wise. ``` s = cudf.Series([1, 2, 3, None, 5]) print(cudf.concat([s, s])) ds2 = dask_cudf.from_cudf(s, npartitions=2) print(dask_cudf.concat([ds2, ds2]).compute()) ``` <a id='join'></a><br> ## Join Performing SQL style merges. Note that the dataframe order is not maintained, but may be restored post-merge by sorting by the index. ``` df_a = cudf.DataFrame() df_a['key'] = ['a', 'b', 'c', 'd', 'e'] df_a['vals_a'] = [float(i + 10) for i in range(5)] df_b = cudf.DataFrame() df_b['key'] = ['a', 'c', 'e'] df_b['vals_b'] = [float(i+100) for i in range(3)] merged = df_a.merge(df_b, on=['key'], how='left') print(merged) ddf_a = dask_cudf.from_cudf(df_a, npartitions=2) ddf_b = dask_cudf.from_cudf(df_b, npartitions=2) merged = ddf_a.merge(ddf_b, on=['key'], how='left').compute() print(merged) ``` <a id='append'></a><br> ## Append Appending values from another `Series` or array-like object. ``` print(s.append(s)) print(ds2.append(ds2).compute()) ``` <a id='grouping'></a><br> ## Grouping Like pandas, cuDF and Dask-cuDF support the Split-Apply-Combine groupby paradigm. ``` df['agg_col1'] = [1 if x % 2 == 0 else 0 for x in range(len(df))] df['agg_col2'] = [1 if x % 3 == 0 else 0 for x in range(len(df))] ddf = dask_cudf.from_cudf(df, npartitions=2) ``` Grouping and then applying the `sum` function to the grouped data. ``` print(df.groupby('agg_col1').sum()) print(ddf.groupby('agg_col1').sum().compute()) ``` Grouping hierarchically then applying the `sum` function to grouped data. We send the result to a pandas dataframe only for printing purposes. ``` print(df.groupby(['agg_col1', 'agg_col2']).sum().to_pandas()) ddf.groupby(['agg_col1', 'agg_col2']).sum().compute().to_pandas() ``` Grouping and applying statistical functions to specific columns, using `agg`. ``` print(df.groupby('agg_col1').agg({'a':'max', 'b':'mean', 'c':'sum'})) print(ddf.groupby('agg_col1').agg({'a':'max', 'b':'mean', 'c':'sum'}).compute()) ``` <a id='tran'></a><br> ## Transpose Transposing a dataframe, using either the `transpose` method or `T` property. Currently, all columns must have the same type. Transposing is not currently implemented in Dask-cuDF. ``` sample = cudf.DataFrame({'a':[1,2,3], 'b':[4,5,6]}) print(sample) print(sample.transpose()) ``` <a id='time'></a><br> Time Series ------------ `DataFrames` supports `datetime` typed columns, which allow users to interact with and filter data based on specific timestamps. ``` import datetime as dt date_df = cudf.DataFrame() date_df['date'] = pd.date_range('11/20/2018', periods=72, freq='D') date_df['value'] = np.random.sample(len(date_df)) search_date = dt.datetime.strptime('2018-11-23', '%Y-%m-%d') print(date_df.query('date <= @search_date')) date_ddf = dask_cudf.from_cudf(date_df, npartitions=2) print(date_ddf.query('date <= @search_date', local_dict={'search_date':search_date}).compute()) ``` <a id='cat'></a><br> Categoricals ------------ `DataFrames` support categorical columns. ``` pdf = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']}) pdf["grade"] = pdf["grade"].astype("category") gdf = cudf.DataFrame.from_pandas(pdf) print(gdf) dgdf = dask_cudf.from_cudf(gdf, npartitions=2) print(dgdf.compute()) ``` Accessing the categories of a column. Note that this is currently not supported in Dask-cuDF. ``` gdf.grade.cat.categories ``` Accessing the underlying code values of each categorical observation. ``` print(gdf.grade.cat.codes) print(dgdf.grade.cat.codes.compute()) ``` <a id='condatarep'></a><br> Converting Data Representation -------------------------------- <a id='pandas'></a><br> ## Pandas Converting a cuDF and Dask-cuDF `DataFrame` to a pandas `DataFrame`. ``` print(df.head().to_pandas()) print(ddf.compute().head().to_pandas()) ``` <a id='numpy'></a><br> ## Numpy Converting a cuDF or Dask-cuDF `DataFrame` to a numpy `ndarray`. ``` print(df.as_matrix()) print(ddf.compute().as_matrix()) ``` Converting a cuDF or Dask-cuDF `Series` to a numpy `ndarray`. ``` print(df['a'].to_array()) print(ddf['a'].compute().to_array()) ``` <a id='arrow'></a><br> ## Arrow Converting a cuDF or Dask-cuDF `DataFrame` to a PyArrow `Table`. ``` print(df.to_arrow()) print(ddf.compute().to_arrow()) ``` <a id='datainout'></a><br> Getting Data In/Out ------------------------ <a id='csv'></a><br> ## CSV Writing to a CSV file, by first sending data to a pandas `Dataframe` on the host. ``` if not os.path.exists('example_output'): os.mkdir('example_output') df.to_pandas().to_csv('example_output/foo.csv', index=False) ddf.compute().to_pandas().to_csv('example_output/foo_dask.csv', index=False) ``` Reading from a csv file. ``` df = cudf.read_csv('example_output/foo.csv') print(df) ddf = dask_cudf.read_csv('example_output/foo_dask.csv') print(ddf.compute()) ``` Reading all CSV files in a directory into a single `dask_cudf.DataFrame`, using the star wildcard. ``` ddf = dask_cudf.read_csv('example_output/*.csv') print(ddf.compute()) ``` <a id='par'></a><br> ## Parquet Writing to parquet files, using the CPU via PyArrow. ``` df.to_parquet('example_output/temp_parquet') ``` Reading parquet files with a GPU-accelerated parquet reader. ``` df = cudf.read_parquet('example_output/temp_parquet') print(df.to_pandas()) ``` Writing to parquet files from a `dask_cudf.DataFrame` using PyArrow under the hood. ``` ddf.to_parquet('example_files') ``` # Conclusion Now we are familiar with creating Dask-CuDF dataframes, selecting, viewing and manipulating data. The operations are almost the same as pandas, and can easily replace the pandas operations in our traditional data science pipeline. While the results may vary slightly on different GPUs, it should be clear that distributed GPU acceleration can make a significant difference. We can get much faster results with the same code! The next tutorial will show you how to use CuML with Dask. This is very exciting as we can now boost our models with distributed GPU programming. ## Licensing This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0). [Previous Notebook](01-Intro_to_Dask.ipynb) &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; [1](01-Intro_to_Dask.ipynb) [2] [3](03-CuML_and_Dask.ipynb) [4](04-Challenge.ipynb) &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; [Next Notebook](03-CuML_and_Dask.ipynb) &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&emsp;&emsp;&emsp; &emsp;&emsp;&ensp; [Home Page](../START_HERE.ipynb)
github_jupyter
import os import numpy as np import pandas as pd import cudf import dask_cudf np.random.seed(12) #### Portions of this were borrowed and adapted from the #### cuDF cheatsheet, existing cuDF documentation, #### and 10 Minutes to Pandas. s = cudf.Series([1,2,3,None,4]) print(s) ds = dask_cudf.from_cudf(s, npartitions=2) print(ds.compute()) df = cudf.DataFrame({'a': list(range(20)), 'b': list(reversed(range(20))), 'c': list(range(20)) }) ddf = dask_cudf.from_cudf(df, npartitions=2) print(ddf.compute()) pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]}) gdf = cudf.DataFrame.from_pandas(pdf) print(gdf) dask_df = dask_cudf.from_cudf(pdf, npartitions=2) dask_gdf = dask_cudf.from_dask_dataframe(dask_df) print(dask_gdf.compute()) print(df.head(2)) print(ddf.head(2)) print(df.sort_values(by='b')) print(ddf.sort_values(by='b').compute()) print(df['a']) print(ddf['a'].compute()) print(df.loc[2:5, ['a', 'b']]) print(ddf.loc[2:5, ['a', 'b']].compute()) print(df.iloc[0]) print(df.iloc[0:3, 0:2]) print(df[3:5]) print(s[3:5]) print(df[df.b > 15]) print(ddf[ddf.b > 15].compute()) print(df.query("b == 3")) print(ddf.query("b == 3").compute()) cudf_comparator = 3 print(df.query("b == @cudf_comparator")) dask_cudf_comparator = 3 print(ddf.query("b == @val", local_dict={'val':dask_cudf_comparator}).compute()) arrays = [['a', 'a', 'b', 'b'], [1, 2, 3, 4]] tuples = list(zip(*arrays)) idx = cudf.MultiIndex.from_tuples(tuples) idx gdf1 = cudf.DataFrame({'first': np.random.rand(4), 'second': np.random.rand(4)}) gdf1.index = idx print(gdf1.to_pandas()) gdf2 = cudf.DataFrame({'first': np.random.rand(4), 'second': np.random.rand(4)}).T gdf2.columns = idx print(gdf2.to_pandas()) print(gdf1.loc[('b', 3)].to_pandas()) print(s.fillna(999)) print(ds.fillna(999).compute()) print(s.mean(), s.var()) print(ds.mean().compute(), ds.var().compute()) def add_ten(num): return num + 10 print(df['a'].applymap(add_ten)) print(ddf['a'].map_partitions(add_ten).compute()) print(df.a.value_counts()) print(ddf.a.value_counts().compute()) s = cudf.Series(['A', 'B', 'C', 'Aaba', 'Baca', None, 'CABA', 'dog', 'cat']) print(s.str.lower()) ds = dask_cudf.from_cudf(s, npartitions=2) print(ds.str.lower().compute()) s = cudf.Series([1, 2, 3, None, 5]) print(cudf.concat([s, s])) ds2 = dask_cudf.from_cudf(s, npartitions=2) print(dask_cudf.concat([ds2, ds2]).compute()) df_a = cudf.DataFrame() df_a['key'] = ['a', 'b', 'c', 'd', 'e'] df_a['vals_a'] = [float(i + 10) for i in range(5)] df_b = cudf.DataFrame() df_b['key'] = ['a', 'c', 'e'] df_b['vals_b'] = [float(i+100) for i in range(3)] merged = df_a.merge(df_b, on=['key'], how='left') print(merged) ddf_a = dask_cudf.from_cudf(df_a, npartitions=2) ddf_b = dask_cudf.from_cudf(df_b, npartitions=2) merged = ddf_a.merge(ddf_b, on=['key'], how='left').compute() print(merged) print(s.append(s)) print(ds2.append(ds2).compute()) df['agg_col1'] = [1 if x % 2 == 0 else 0 for x in range(len(df))] df['agg_col2'] = [1 if x % 3 == 0 else 0 for x in range(len(df))] ddf = dask_cudf.from_cudf(df, npartitions=2) print(df.groupby('agg_col1').sum()) print(ddf.groupby('agg_col1').sum().compute()) print(df.groupby(['agg_col1', 'agg_col2']).sum().to_pandas()) ddf.groupby(['agg_col1', 'agg_col2']).sum().compute().to_pandas() print(df.groupby('agg_col1').agg({'a':'max', 'b':'mean', 'c':'sum'})) print(ddf.groupby('agg_col1').agg({'a':'max', 'b':'mean', 'c':'sum'}).compute()) sample = cudf.DataFrame({'a':[1,2,3], 'b':[4,5,6]}) print(sample) print(sample.transpose()) import datetime as dt date_df = cudf.DataFrame() date_df['date'] = pd.date_range('11/20/2018', periods=72, freq='D') date_df['value'] = np.random.sample(len(date_df)) search_date = dt.datetime.strptime('2018-11-23', '%Y-%m-%d') print(date_df.query('date <= @search_date')) date_ddf = dask_cudf.from_cudf(date_df, npartitions=2) print(date_ddf.query('date <= @search_date', local_dict={'search_date':search_date}).compute()) pdf = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']}) pdf["grade"] = pdf["grade"].astype("category") gdf = cudf.DataFrame.from_pandas(pdf) print(gdf) dgdf = dask_cudf.from_cudf(gdf, npartitions=2) print(dgdf.compute()) gdf.grade.cat.categories print(gdf.grade.cat.codes) print(dgdf.grade.cat.codes.compute()) print(df.head().to_pandas()) print(ddf.compute().head().to_pandas()) print(df.as_matrix()) print(ddf.compute().as_matrix()) print(df['a'].to_array()) print(ddf['a'].compute().to_array()) print(df.to_arrow()) print(ddf.compute().to_arrow()) if not os.path.exists('example_output'): os.mkdir('example_output') df.to_pandas().to_csv('example_output/foo.csv', index=False) ddf.compute().to_pandas().to_csv('example_output/foo_dask.csv', index=False) df = cudf.read_csv('example_output/foo.csv') print(df) ddf = dask_cudf.read_csv('example_output/foo_dask.csv') print(ddf.compute()) ddf = dask_cudf.read_csv('example_output/*.csv') print(ddf.compute()) df.to_parquet('example_output/temp_parquet') df = cudf.read_parquet('example_output/temp_parquet') print(df.to_pandas()) ddf.to_parquet('example_files')
0.151686
0.99585
**4장 – 모델 훈련** _이 노트북은 4장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._ <table align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/rickiepark/handson-ml2/blob/master/04_training_linear_models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩에서 실행하기</a> </td> </table> # 설정 먼저 몇 개의 모듈을 임포트합니다. 맷플롯립 그래프를 인라인으로 출력하도록 만들고 그림을 저장하는 함수를 준비합니다. 또한 파이썬 버전이 3.5 이상인지 확인합니다(파이썬 2.x에서도 동작하지만 곧 지원이 중단되므로 파이썬 3을 사용하는 것이 좋습니다). 사이킷런 버전이 0.20 이상인지도 확인합니다. ``` # 파이썬 ≥3.5 필수 import sys assert sys.version_info >= (3, 5) # 사이킷런 ≥0.20 필수 import sklearn assert sklearn.__version__ >= "0.20" # 공통 모듈 임포트 import numpy as np import os # 노트북 실행 결과를 동일하게 유지하기 위해 np.random.seed(42) # 깔끔한 그래프 출력을 위해 %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # 그림을 저장할 위치 PROJECT_ROOT_DIR = "." CHAPTER_ID = "training_linear_models" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("그림 저장:", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # 불필요한 경고를 무시합니다 (사이파이 이슈 #5998 참조) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") ``` # 정규 방정식을 사용한 선형 회귀 ``` import numpy as np X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1) plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([0, 2, 0, 15]) save_fig("generated_data_plot") plt.show() ``` **식 4-4: 정규 방정식** $\hat{\boldsymbol{\theta}} = (\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y}$ ``` X_b = np.c_[np.ones((100, 1)), X] # 모든 샘플에 x0 = 1을 추가합니다. theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) theta_best ``` $\hat{y} = \mathbf{X} \boldsymbol{\hat{\theta}}$ ``` X_new = np.array([[0], [2]]) X_new_b = np.c_[np.ones((2, 1)), X_new] # 모든 샘플에 x0 = 1을 추가합니다. y_predict = X_new_b.dot(theta_best) y_predict plt.plot(X_new, y_predict, "r-") plt.plot(X, y, "b.") plt.axis([0, 2, 0, 15]) plt.show() ``` 책에 있는 그림은 범례와 축 레이블이 있는 그래프입니다: ``` plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions") plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 2, 0, 15]) save_fig("linear_model_predictions_plot") plt.show() from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X, y) lin_reg.intercept_, lin_reg.coef_ lin_reg.predict(X_new) ``` `LinearRegression` 클래스는 `scipy.linalg.lstsq()` 함수("least squares"의 약자)를 사용하므로 이 함수를 직접 사용할 수 있습니다: ``` # 싸이파이 lstsq() 함수를 사용하려면 scipy.linalg.lstsq(X_b, y)와 같이 씁니다. theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6) theta_best_svd ``` 이 함수는 $\mathbf{X}^+\mathbf{y}$을 계산합니다. $\mathbf{X}^{+}$는 $\mathbf{X}$의 _유사역행렬_ (pseudoinverse)입니다(Moore–Penrose 유사역행렬입니다). `np.linalg.pinv()`을 사용해서 유사역행렬을 직접 계산할 수 있습니다: $\boldsymbol{\hat{\theta}} = \mathbf{X}^{-1}\hat{y}$ ``` np.linalg.pinv(X_b).dot(y) ``` # 배치 경사 하강법을 사용한 선형 회귀 **식 4-6: 비용 함수의 그레이디언트 벡터** $ \dfrac{\partial}{\partial \boldsymbol{\theta}} \text{MSE}(\boldsymbol{\theta}) = \dfrac{2}{m} \mathbf{X}^T (\mathbf{X} \boldsymbol{\theta} - \mathbf{y}) $ **식 4-7: 경사 하강법의 스텝** $ \boldsymbol{\theta}^{(\text{next step})} = \boldsymbol{\theta} - \eta \dfrac{\partial}{\partial \boldsymbol{\theta}} \text{MSE}(\boldsymbol{\theta}) $ ``` eta = 0.1 # 학습률 n_iterations = 1000 m = 100 theta = np.random.randn(2,1) # 랜덤 초기화 for iteration in range(n_iterations): gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients theta X_new_b.dot(theta) theta_path_bgd = [] def plot_gradient_descent(theta, eta, theta_path=None): m = len(X_b) plt.plot(X, y, "b.") n_iterations = 1000 for iteration in range(n_iterations): if iteration < 10: y_predict = X_new_b.dot(theta) style = "b-" if iteration > 0 else "r--" plt.plot(X_new, y_predict, style) gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients if theta_path is not None: theta_path.append(theta) plt.xlabel("$x_1$", fontsize=18) plt.axis([0, 2, 0, 15]) plt.title(r"$\eta = {}$".format(eta), fontsize=16) np.random.seed(42) theta = np.random.randn(2,1) # random initialization plt.figure(figsize=(10,4)) plt.subplot(131); plot_gradient_descent(theta, eta=0.02) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd) plt.subplot(133); plot_gradient_descent(theta, eta=0.5) save_fig("gradient_descent_plot") plt.show() ``` # 확률적 경사 하강법 ``` theta_path_sgd = [] m = len(X_b) np.random.seed(42) n_epochs = 50 t0, t1 = 5, 50 # 학습 스케줄 하이퍼파라미터 def learning_schedule(t): return t0 / (t + t1) theta = np.random.randn(2,1) # 랜덤 초기화 for epoch in range(n_epochs): for i in range(m): if epoch == 0 and i < 20: # 책에는 없음 y_predict = X_new_b.dot(theta) # 책에는 없음 style = "b-" if i > 0 else "r--" # 책에는 없음 plt.plot(X_new, y_predict, style) # 책에는 없음 random_index = np.random.randint(m) xi = X_b[random_index:random_index+1] yi = y[random_index:random_index+1] gradients = 2 * xi.T.dot(xi.dot(theta) - yi) eta = learning_schedule(epoch * m + i) theta = theta - eta * gradients theta_path_sgd.append(theta) # 책에는 없음 plt.plot(X, y, "b.") # 책에는 없음 plt.xlabel("$x_1$", fontsize=18) # 책에는 없음 plt.ylabel("$y$", rotation=0, fontsize=18) # 책에는 없음 plt.axis([0, 2, 0, 15]) # 책에는 없음 save_fig("sgd_plot") # 책에는 없음 plt.show() # 책에는 없음 theta from sklearn.linear_model import SGDRegressor sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1, random_state=42) sgd_reg.fit(X, y.ravel()) sgd_reg.intercept_, sgd_reg.coef_ ``` # 미니배치 경사 하강법 ``` theta_path_mgd = [] n_iterations = 50 minibatch_size = 20 np.random.seed(42) theta = np.random.randn(2,1) # 랜덤 초기화 t0, t1 = 200, 1000 def learning_schedule(t): return t0 / (t + t1) t = 0 for epoch in range(n_iterations): shuffled_indices = np.random.permutation(m) X_b_shuffled = X_b[shuffled_indices] y_shuffled = y[shuffled_indices] for i in range(0, m, minibatch_size): t += 1 xi = X_b_shuffled[i:i+minibatch_size] yi = y_shuffled[i:i+minibatch_size] gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi) eta = learning_schedule(t) theta = theta - eta * gradients theta_path_mgd.append(theta) theta theta_path_bgd = np.array(theta_path_bgd) theta_path_sgd = np.array(theta_path_sgd) theta_path_mgd = np.array(theta_path_mgd) plt.figure(figsize=(7,4)) plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic") plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch") plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch") plt.legend(loc="upper left", fontsize=16) plt.xlabel(r"$\theta_0$", fontsize=20) plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0) plt.axis([2.5, 4.5, 2.3, 3.9]) save_fig("gradient_descent_paths_plot") plt.show() ``` # 다항 회귀 ``` import numpy as np import numpy.random as rnd np.random.seed(42) m = 100 X = 6 * np.random.rand(m, 1) - 3 y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1) plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([-3, 3, 0, 10]) save_fig("quadratic_data_plot") plt.show() from sklearn.preprocessing import PolynomialFeatures poly_features = PolynomialFeatures(degree=2, include_bias=False) X_poly = poly_features.fit_transform(X) X[0] X_poly[0] lin_reg = LinearRegression() lin_reg.fit(X_poly, y) lin_reg.intercept_, lin_reg.coef_ X_new=np.linspace(-3, 3, 100).reshape(100, 1) X_new_poly = poly_features.transform(X_new) y_new = lin_reg.predict(X_new_poly) plt.plot(X, y, "b.") plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) plt.axis([-3, 3, 0, 10]) save_fig("quadratic_predictions_plot") plt.show() from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)): polybig_features = PolynomialFeatures(degree=degree, include_bias=False) std_scaler = StandardScaler() lin_reg = LinearRegression() polynomial_regression = Pipeline([ ("poly_features", polybig_features), ("std_scaler", std_scaler), ("lin_reg", lin_reg), ]) polynomial_regression.fit(X, y) y_newbig = polynomial_regression.predict(X_new) plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width) plt.plot(X, y, "b.", linewidth=3) plt.legend(loc="upper left") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([-3, 3, 0, 10]) save_fig("high_degree_polynomials_plot") plt.show() from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split def plot_learning_curves(model, X, y): X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10) train_errors, val_errors = [], [] for m in range(1, len(X_train)): model.fit(X_train[:m], y_train[:m]) y_train_predict = model.predict(X_train[:m]) y_val_predict = model.predict(X_val) train_errors.append(mean_squared_error(y_train[:m], y_train_predict)) val_errors.append(mean_squared_error(y_val, y_val_predict)) plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train") plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val") plt.legend(loc="upper right", fontsize=14) # 책에는 없음 plt.xlabel("Training set size", fontsize=14) # 책에는 없음 plt.ylabel("RMSE", fontsize=14) # 책에는 없음 lin_reg = LinearRegression() plot_learning_curves(lin_reg, X, y) plt.axis([0, 80, 0, 3]) # 책에는 없음 save_fig("underfitting_learning_curves_plot") # 책에는 없음 plt.show() # 책에는 없음 from sklearn.pipeline import Pipeline polynomial_regression = Pipeline([ ("poly_features", PolynomialFeatures(degree=10, include_bias=False)), ("lin_reg", LinearRegression()), ]) plot_learning_curves(polynomial_regression, X, y) plt.axis([0, 80, 0, 3]) # 책에는 없음 save_fig("learning_curves_plot") # 책에는 없음 plt.show() # 책에는 없음 ``` # 규제가 있는 모델 ``` np.random.seed(42) m = 20 X = 3 * np.random.rand(m, 1) y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5 X_new = np.linspace(0, 3, 100).reshape(100, 1) ``` **식 4-8: 릿지 회귀의 비용 함수** $ J(\boldsymbol{\theta}) = \text{MSE}(\boldsymbol{\theta}) + \alpha \dfrac{1}{2}\sum\limits_{i=1}^{n}{\theta_i}^2 $ ``` from sklearn.linear_model import Ridge ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42) ridge_reg.fit(X, y) ridge_reg.predict([[1.5]]) ridge_reg = Ridge(alpha=1, solver="sag", random_state=42) ridge_reg.fit(X, y) ridge_reg.predict([[1.5]]) from sklearn.linear_model import Ridge def plot_model(model_class, polynomial, alphas, **model_kargs): for alpha, style in zip(alphas, ("b-", "g--", "r:")): model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression() if polynomial: model = Pipeline([ ("poly_features", PolynomialFeatures(degree=10, include_bias=False)), ("std_scaler", StandardScaler()), ("regul_reg", model), ]) model.fit(X, y) y_new_regul = model.predict(X_new) lw = 2 if alpha > 0 else 1 plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha)) plt.plot(X, y, "b.", linewidth=3) plt.legend(loc="upper left", fontsize=15) plt.xlabel("$x_1$", fontsize=18) plt.axis([0, 3, 0, 4]) plt.figure(figsize=(8,4)) plt.subplot(121) plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(122) plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42) save_fig("ridge_regression_plot") plt.show() ``` **노트**: 향후 버전이 바뀌더라도 동일한 결과를 만들기 위해 사이킷런 0.21 버전의 기본값인 `max_iter=1000`과 `tol=1e-3`으로 지정합니다. ``` sgd_reg = SGDRegressor(penalty="l2", max_iter=1000, tol=1e-3, random_state=42) sgd_reg.fit(X, y.ravel()) sgd_reg.predict([[1.5]]) ``` **식 4-10: 라쏘 회귀의 비용 함수** $ J(\boldsymbol{\theta}) = \text{MSE}(\boldsymbol{\theta}) + \alpha \sum\limits_{i=1}^{n}\left| \theta_i \right| $ ``` from sklearn.linear_model import Lasso plt.figure(figsize=(8,4)) plt.subplot(121) plot_model(Lasso, polynomial=False, alphas=(0, 0.1, 1), random_state=42) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(122) plot_model(Lasso, polynomial=True, alphas=(0, 10**-7, 1), random_state=42) save_fig("lasso_regression_plot") plt.show() from sklearn.linear_model import Lasso lasso_reg = Lasso(alpha=0.1) lasso_reg.fit(X, y) lasso_reg.predict([[1.5]]) ``` **식 4-12: 엘라스틱넷 비용 함수** $ J(\boldsymbol{\theta}) = \text{MSE}(\boldsymbol{\theta}) + r \alpha \sum\limits_{i=1}^{n}\left| \theta_i \right| + \dfrac{1 - r}{2} \alpha \sum\limits_{i=1}^{n}{{\theta_i}^2} $ ``` from sklearn.linear_model import ElasticNet elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5, random_state=42) elastic_net.fit(X, y) elastic_net.predict([[1.5]]) np.random.seed(42) m = 100 X = 6 * np.random.rand(m, 1) - 3 y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1) X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10) ``` 조기 종료 예제: ``` from copy import deepcopy poly_scaler = Pipeline([ ("poly_features", PolynomialFeatures(degree=90, include_bias=False)), ("std_scaler", StandardScaler()) ]) X_train_poly_scaled = poly_scaler.fit_transform(X_train) X_val_poly_scaled = poly_scaler.transform(X_val) sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True, penalty=None, learning_rate="constant", eta0=0.0005, random_state=42) minimum_val_error = float("inf") best_epoch = None best_model = None for epoch in range(1000): sgd_reg.fit(X_train_poly_scaled, y_train) # 중지된 곳에서 다시 시작합니다 y_val_predict = sgd_reg.predict(X_val_poly_scaled) val_error = mean_squared_error(y_val, y_val_predict) if val_error < minimum_val_error: minimum_val_error = val_error best_epoch = epoch best_model = deepcopy(sgd_reg) ``` 그래프를 그립니다: ``` sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True, penalty=None, learning_rate="constant", eta0=0.0005, random_state=42) n_epochs = 500 train_errors, val_errors = [], [] for epoch in range(n_epochs): sgd_reg.fit(X_train_poly_scaled, y_train) y_train_predict = sgd_reg.predict(X_train_poly_scaled) y_val_predict = sgd_reg.predict(X_val_poly_scaled) train_errors.append(mean_squared_error(y_train, y_train_predict)) val_errors.append(mean_squared_error(y_val, y_val_predict)) best_epoch = np.argmin(val_errors) best_val_rmse = np.sqrt(val_errors[best_epoch]) plt.annotate('Best model', xy=(best_epoch, best_val_rmse), xytext=(best_epoch, best_val_rmse + 1), ha="center", arrowprops=dict(facecolor='black', shrink=0.05), fontsize=16, ) best_val_rmse -= 0.03 # just to make the graph look better plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2) plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="Validation set") plt.plot(np.sqrt(train_errors), "r--", linewidth=2, label="Training set") plt.legend(loc="upper right", fontsize=14) plt.xlabel("Epoch", fontsize=14) plt.ylabel("RMSE", fontsize=14) save_fig("early_stopping_plot") plt.show() best_epoch, best_model %matplotlib inline import matplotlib.pyplot as plt import numpy as np t1a, t1b, t2a, t2b = -1, 3, -1.5, 1.5 t1s = np.linspace(t1a, t1b, 500) t2s = np.linspace(t2a, t2b, 500) t1, t2 = np.meshgrid(t1s, t2s) T = np.c_[t1.ravel(), t2.ravel()] Xr = np.array([[1, 1], [1, -1], [1, 0.5]]) yr = 2 * Xr[:, :1] + 0.5 * Xr[:, 1:] J = (1/len(Xr) * np.sum((T.dot(Xr.T) - yr.T)**2, axis=1)).reshape(t1.shape) N1 = np.linalg.norm(T, ord=1, axis=1).reshape(t1.shape) N2 = np.linalg.norm(T, ord=2, axis=1).reshape(t1.shape) t_min_idx = np.unravel_index(np.argmin(J), J.shape) t1_min, t2_min = t1[t_min_idx], t2[t_min_idx] t_init = np.array([[0.25], [-1]]) def bgd_path(theta, X, y, l1, l2, core = 1, eta = 0.05, n_iterations = 200): path = [theta] for iteration in range(n_iterations): gradients = core * 2/len(X) * X.T.dot(X.dot(theta) - y) + l1 * np.sign(theta) + l2 * theta theta = theta - eta * gradients path.append(theta) return np.array(path) fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10.1, 8)) for i, N, l1, l2, title in ((0, N1, 2., 0, "Lasso"), (1, N2, 0, 2., "Ridge")): JR = J + l1 * N1 + l2 * 0.5 * N2**2 tr_min_idx = np.unravel_index(np.argmin(JR), JR.shape) t1r_min, t2r_min = t1[tr_min_idx], t2[tr_min_idx] levelsJ=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(J) - np.min(J)) + np.min(J) levelsJR=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(JR) - np.min(JR)) + np.min(JR) levelsN=np.linspace(0, np.max(N), 10) path_J = bgd_path(t_init, Xr, yr, l1=0, l2=0) path_JR = bgd_path(t_init, Xr, yr, l1, l2) path_N = bgd_path(np.array([[2.0], [0.5]]), Xr, yr, np.sign(l1)/3, np.sign(l2), core=0) ax = axes[i, 0] ax.grid(True) ax.axhline(y=0, color='k') ax.axvline(x=0, color='k') ax.contourf(t1, t2, N / 2., levels=levelsN) ax.plot(path_N[:, 0], path_N[:, 1], "y--") ax.plot(0, 0, "ys") ax.plot(t1_min, t2_min, "ys") ax.set_title(r"$\ell_{}$ penalty".format(i + 1), fontsize=16) ax.axis([t1a, t1b, t2a, t2b]) if i == 1: ax.set_xlabel(r"$\theta_1$", fontsize=16) ax.set_ylabel(r"$\theta_2$", fontsize=16, rotation=0) ax = axes[i, 1] ax.grid(True) ax.axhline(y=0, color='k') ax.axvline(x=0, color='k') ax.contourf(t1, t2, JR, levels=levelsJR, alpha=0.9) ax.plot(path_JR[:, 0], path_JR[:, 1], "w-o") ax.plot(path_N[:, 0], path_N[:, 1], "y--") ax.plot(0, 0, "ys") ax.plot(t1_min, t2_min, "ys") ax.plot(t1r_min, t2r_min, "rs") ax.set_title(title, fontsize=16) ax.axis([t1a, t1b, t2a, t2b]) if i == 1: ax.set_xlabel(r"$\theta_1$", fontsize=16) save_fig("lasso_vs_ridge_plot") plt.show() ``` # 로지스틱 회귀 ``` t = np.linspace(-10, 10, 100) sig = 1 / (1 + np.exp(-t)) plt.figure(figsize=(9, 3)) plt.plot([-10, 10], [0, 0], "k-") plt.plot([-10, 10], [0.5, 0.5], "k:") plt.plot([-10, 10], [1, 1], "k:") plt.plot([0, 0], [-1.1, 1.1], "k-") plt.plot(t, sig, "b-", linewidth=2, label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$") plt.xlabel("t") plt.legend(loc="upper left", fontsize=20) plt.axis([-10, 10, -0.1, 1.1]) save_fig("logistic_function_plot") plt.show() ``` **식 4-16: 하나의 훈련 샘플에 대한 비용 함수** $ c(\boldsymbol{\theta}) = \begin{cases} -\log(\hat{p}) & \text{if } y = 1, \\ -\log(1 - \hat{p}) & \text{if } y = 0. \end{cases} $ **식 4-17: 로지스틱 회귀 비용 함수(로그 손실)** $ J(\boldsymbol{\theta}) = -\dfrac{1}{m} \sum\limits_{i=1}^{m}{\left[ y^{(i)} log\left(\hat{p}^{(i)}\right) + (1 - y^{(i)}) log\left(1 - \hat{p}^{(i)}\right)\right]} $ **식 4-18: 로지스틱 비용 함수의 편도 함수** $ \dfrac{\partial}{\partial \theta_j} \text{J}(\boldsymbol{\theta}) = \dfrac{1}{m}\sum\limits_{i=1}^{m}\left(\mathbf{\sigma(\boldsymbol{\theta}}^T \mathbf{x}^{(i)}) - y^{(i)}\right)\, x_j^{(i)} $ ``` from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) print(iris.DESCR) X = iris["data"][:, 3:] # 꽃잎 너비 y = (iris["target"] == 2).astype(np.int) # Iris virginica이면 1 아니면 0 ``` **노트**: 향후 버전이 바뀌더라도 동일한 결과를 만들기 위해 사이킷런 0.22 버전의 기본값인 `solver="lbfgs"`로 지정합니다. ``` from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression(solver="lbfgs", random_state=42) log_reg.fit(X, y) X_new = np.linspace(0, 3, 1000).reshape(-1, 1) y_proba = log_reg.predict_proba(X_new) plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris virginica") plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris virginica") ``` 책에 실린 그림은 조금 더 예쁘게 꾸몄습니다: ``` X_new = np.linspace(0, 3, 1000).reshape(-1, 1) y_proba = log_reg.predict_proba(X_new) decision_boundary = X_new[y_proba[:, 1] >= 0.5][0] plt.figure(figsize=(8, 3)) plt.plot(X[y==0], y[y==0], "bs") plt.plot(X[y==1], y[y==1], "g^") plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2) plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris virginica") plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris virginica") plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center") plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b') plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g') plt.xlabel("Petal width (cm)", fontsize=14) plt.ylabel("Probability", fontsize=14) plt.legend(loc="center left", fontsize=14) plt.axis([0, 3, -0.02, 1.02]) save_fig("logistic_regression_plot") plt.show() decision_boundary log_reg.predict([[1.7], [1.5]]) from sklearn.linear_model import LogisticRegression X = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.int) log_reg = LogisticRegression(solver="lbfgs", C=10**10, random_state=42) log_reg.fit(X, y) x0, x1 = np.meshgrid( np.linspace(2.9, 7, 500).reshape(-1, 1), np.linspace(0.8, 2.7, 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] y_proba = log_reg.predict_proba(X_new) plt.figure(figsize=(10, 4)) plt.plot(X[y==0, 0], X[y==0, 1], "bs") plt.plot(X[y==1, 0], X[y==1, 1], "g^") zz = y_proba[:, 1].reshape(x0.shape) contour = plt.contour(x0, x1, zz, cmap=plt.cm.brg) left_right = np.array([2.9, 7]) boundary = -(log_reg.coef_[0][0] * left_right + log_reg.intercept_[0]) / log_reg.coef_[0][1] plt.clabel(contour, inline=1, fontsize=12) plt.plot(left_right, boundary, "k--", linewidth=3) plt.text(3.5, 1.5, "Not Iris virginica", fontsize=14, color="b", ha="center") plt.text(6.5, 2.3, "Iris virginica", fontsize=14, color="g", ha="center") plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.axis([2.9, 7, 0.8, 2.7]) save_fig("logistic_regression_contour_plot") plt.show() ``` **식 4-20: 소프트맥스 함수** $ \hat{p}_k = \sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \dfrac{\exp\left(s_k(\mathbf{x})\right)}{\sum\limits_{j=1}^{K}{\exp\left(s_j(\mathbf{x})\right)}} $ **식 4-22: 크로스 엔트로피 비용 함수** $ J(\boldsymbol{\Theta}) = - \dfrac{1}{m}\sum\limits_{i=1}^{m}\sum\limits_{k=1}^{K}{y_k^{(i)}\log\left(\hat{p}_k^{(i)}\right)} $ **식 4-23: 클래스 k에 대한 크로스 엔트로피의 그레이디언트 벡터** $ \nabla_{\boldsymbol{\theta}^{(k)}} \, J(\boldsymbol{\Theta}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{ \left ( \hat{p}^{(i)}_k - y_k^{(i)} \right ) \mathbf{x}^{(i)}} $ ``` X = iris["data"][:, (2, 3)] # 꽃잎 길이, 꽃잎 너비 y = iris["target"] softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10, random_state=42) softmax_reg.fit(X, y) x0, x1 = np.meshgrid( np.linspace(0, 8, 500).reshape(-1, 1), np.linspace(0, 3.5, 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] y_proba = softmax_reg.predict_proba(X_new) y_predict = softmax_reg.predict(X_new) zz1 = y_proba[:, 1].reshape(x0.shape) zz = y_predict.reshape(x0.shape) plt.figure(figsize=(10, 4)) plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris virginica") plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris versicolor") plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris setosa") from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x0, x1, zz, cmap=custom_cmap) contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg) plt.clabel(contour, inline=1, fontsize=12) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="center left", fontsize=14) plt.axis([0, 7, 0, 3.5]) save_fig("softmax_regression_contour_plot") plt.show() softmax_reg.predict([[5, 2]]) softmax_reg.predict_proba([[5, 2]]) ``` # 연습문제 해답 ## 1. to 11. 부록 A를 참고하세요. ## 12. 조기 종료를 사용한 배치 경사 하강법으로 소프트맥스 회귀 구현하기 (사이킷런을 사용하지 않고) 먼저 데이터를 로드합니다. 앞서 사용했던 Iris 데이터셋을 재사용하겠습니다. ``` X = iris["data"][:, (2, 3)] # 꽃잎 길이, 꽃잎 넓이 y = iris["target"] ``` 모든 샘플에 편향을 추가합니다 ($x_0 = 1$): ``` X_with_bias = np.c_[np.ones([len(X), 1]), X] ``` 결과를 일정하게 유지하기 위해 랜덤 시드를 지정합니다: ``` np.random.seed(2042) ``` 데이터셋을 훈련 세트, 검증 세트, 테스트 세트로 나누는 가장 쉬운 방법은 사이킷런의 `train_test_split()` 함수를 사용하는 것입니다. 하지만 이 연습문제의 목적은 직접 만들어 보면서 알고리즘을 이해하는 것이므로 다음과 같이 수동으로 나누어 보겠습니다: ``` test_ratio = 0.2 validation_ratio = 0.2 total_size = len(X_with_bias) test_size = int(total_size * test_ratio) validation_size = int(total_size * validation_ratio) train_size = total_size - test_size - validation_size rnd_indices = np.random.permutation(total_size) X_train = X_with_bias[rnd_indices[:train_size]] y_train = y[rnd_indices[:train_size]] X_valid = X_with_bias[rnd_indices[train_size:-test_size]] y_valid = y[rnd_indices[train_size:-test_size]] X_test = X_with_bias[rnd_indices[-test_size:]] y_test = y[rnd_indices[-test_size:]] ``` 타깃은 클래스 인덱스(0, 1 그리고 2)이지만 소프트맥스 회귀 모델을 훈련시키기 위해 필요한 것은 타깃 클래스의 확률입니다. 각 샘플에서 확률이 1인 타깃 클래스를 제외한 다른 클래스의 확률은 0입니다(다른 말로하면 주어진 샘플에 대한 클래스 확률이 원-핫 벡터입니다). 클래스 인덱스를 원-핫 벡터로 바꾸는 간단한 함수를 작성하겠습니다: ``` def to_one_hot(y): n_classes = y.max() + 1 m = len(y) Y_one_hot = np.zeros((m, n_classes)) Y_one_hot[np.arange(m), y] = 1 return Y_one_hot ``` 10개 샘플만 넣어 이 함수를 테스트해 보죠: ``` y_train[:10] to_one_hot(y_train[:10]) ``` 잘 되네요, 이제 훈련 세트와 테스트 세트의 타깃 클래스 확률을 담은 행렬을 만들겠습니다: ``` Y_train_one_hot = to_one_hot(y_train) Y_valid_one_hot = to_one_hot(y_valid) Y_test_one_hot = to_one_hot(y_test) ``` 이제 소프트맥스 함수를 만듭니다. 다음 공식을 참고하세요: $\sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \dfrac{\exp\left(s_k(\mathbf{x})\right)}{\sum\limits_{j=1}^{K}{\exp\left(s_j(\mathbf{x})\right)}}$ ``` def softmax(logits): exps = np.exp(logits) exp_sums = np.sum(exps, axis=1, keepdims=True) return exps / exp_sums ``` 훈련을 위한 준비를 거의 마쳤습니다. 입력과 출력의 개수를 정의합니다: ``` n_inputs = X_train.shape[1] # == 3 (특성 2개와 편향) n_outputs = len(np.unique(y_train)) # == 3 (3개의 붓꽃 클래스) ``` 이제 좀 복잡한 훈련 파트입니다! 이론적으로는 간단합니다. 그냥 수학 공식을 파이썬 코드로 바꾸기만 하면 됩니다. 하지만 실제로는 꽤 까다로운 면이 있습니다. 특히, 항이나 인덱스의 순서가 뒤섞이기 쉽습니다. 제대로 작동할 것처럼 코드를 작성했더라도 실제 제대로 계산하지 못합니다. 확실하지 않을 때는 각 항의 크기를 기록하고 이에 상응하는 코드가 같은 크기를 만드는지 확인합니다. 각 항을 독립적으로 평가해서 출력해 보는 것도 좋습니다. 사실 사이킷런에 이미 잘 구현되어 있기 때문에 이렇게 할 필요는 없습니다. 하지만 직접 만들어 보면 어떻게 작동하는지 이해하는데 도움이 됩니다. 구현할 공식은 비용함수입니다: $J(\mathbf{\Theta}) = - \dfrac{1}{m}\sum\limits_{i=1}^{m}\sum\limits_{k=1}^{K}{y_k^{(i)}\log\left(\hat{p}_k^{(i)}\right)}$ 그리고 그레이디언트 공식입니다: $\nabla_{\mathbf{\theta}^{(k)}} \, J(\mathbf{\Theta}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{ \left ( \hat{p}^{(i)}_k - y_k^{(i)} \right ) \mathbf{x}^{(i)}}$ $\hat{p}_k^{(i)} = 0$이면 $\log\left(\hat{p}_k^{(i)}\right)$를 계산할 수 없습니다. `nan` 값을 피하기 위해 $\log\left(\hat{p}_k^{(i)}\right)$에 아주 작은 값 $\epsilon$을 추가하겠습니다. ``` eta = 0.01 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 Theta = np.random.randn(n_inputs, n_outputs) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1)) error = Y_proba - Y_train_one_hot if iteration % 500 == 0: print(iteration, loss) gradients = 1/m * X_train.T.dot(error) Theta = Theta - eta * gradients ``` 바로 이겁니다! 소프트맥스 모델을 훈련시켰습니다. 모델 파라미터를 확인해 보겠습니다: ``` Theta ``` 검증 세트에 대한 예측과 정확도를 확인해 보겠습니다: ``` logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_score ``` 와우, 이 모델이 매우 잘 작동하는 것 같습니다. 연습을 위해서 $\ell_2$ 규제를 조금 추가해 보겠습니다. 다음 코드는 위와 거의 동일하지만 손실에 $\ell_2$ 페널티가 추가되었고 그래디언트에도 항이 추가되었습니다(`Theta`의 첫 번째 원소는 편향이므로 규제하지 않습니다). 학습률 `eta`도 증가시켜 보겠습니다. ``` eta = 0.1 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 alpha = 0.1 # 규제 하이퍼파라미터 Theta = np.random.randn(n_inputs, n_outputs) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss error = Y_proba - Y_train_one_hot if iteration % 500 == 0: print(iteration, loss) gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_outputs]), alpha * Theta[1:]] Theta = Theta - eta * gradients ``` 추가된 $\ell_2$ 페널티 때문에 이전보다 손실이 조금 커보이지만 더 잘 작동하는 모델이 되었을까요? 확인해 보죠: ``` logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_score ``` 와우, 완벽한 정확도네요! 운이 좋은 검증 세트일지 모르지만 잘 된 것은 맞습니다. 이제 조기 종료를 추가해 보죠. 이렇게 하려면 매 반복에서 검증 세트에 대한 손실을 계산해서 오차가 증가하기 시작할 때 멈춰야 합니다. ``` eta = 0.1 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 alpha = 0.1 # 규제 하이퍼파라미터 best_loss = np.infty Theta = np.random.randn(n_inputs, n_outputs) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss error = Y_proba - Y_train_one_hot gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_outputs]), alpha * Theta[1:]] Theta = Theta - eta * gradients logits = X_valid.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(Y_valid_one_hot * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss if iteration % 500 == 0: print(iteration, loss) if loss < best_loss: best_loss = loss else: print(iteration - 1, best_loss) print(iteration, loss, "조기 종료!") break logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_score ``` 여전히 완벽하지만 더 빠릅니다. 이제 전체 데이터셋에 대한 모델의 예측을 그래프로 나타내 보겠습니다: ``` x0, x1 = np.meshgrid( np.linspace(0, 8, 500).reshape(-1, 1), np.linspace(0, 3.5, 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] X_new_with_bias = np.c_[np.ones([len(X_new), 1]), X_new] logits = X_new_with_bias.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) zz1 = Y_proba[:, 1].reshape(x0.shape) zz = y_predict.reshape(x0.shape) plt.figure(figsize=(10, 4)) plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris virginica") plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris versicolor") plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris setosa") from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x0, x1, zz, cmap=custom_cmap) contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg) plt.clabel(contour, inline=1, fontsize=12) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 7, 0, 3.5]) plt.show() ``` 이제 테스트 세트에 대한 모델의 최종 정확도를 측정해 보겠습니다: ``` logits = X_test.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_test) accuracy_score ``` 완벽했던 최종 모델의 성능이 조금 떨어졌습니다. 이런 차이는 데이터셋이 작기 때문일 것입니다. 훈련 세트와 검증 세트, 테스트 세트를 어떻게 샘플링했는지에 따라 매우 다른 결과를 얻을 수 있습니다. 몇 번 랜덤 시드를 바꾸고 이 코드를 다시 실행해 보면 결과가 달라지는 것을 확인할 수 있습니다.
github_jupyter
# 파이썬 ≥3.5 필수 import sys assert sys.version_info >= (3, 5) # 사이킷런 ≥0.20 필수 import sklearn assert sklearn.__version__ >= "0.20" # 공통 모듈 임포트 import numpy as np import os # 노트북 실행 결과를 동일하게 유지하기 위해 np.random.seed(42) # 깔끔한 그래프 출력을 위해 %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # 그림을 저장할 위치 PROJECT_ROOT_DIR = "." CHAPTER_ID = "training_linear_models" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("그림 저장:", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # 불필요한 경고를 무시합니다 (사이파이 이슈 #5998 참조) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") import numpy as np X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1) plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([0, 2, 0, 15]) save_fig("generated_data_plot") plt.show() X_b = np.c_[np.ones((100, 1)), X] # 모든 샘플에 x0 = 1을 추가합니다. theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) theta_best X_new = np.array([[0], [2]]) X_new_b = np.c_[np.ones((2, 1)), X_new] # 모든 샘플에 x0 = 1을 추가합니다. y_predict = X_new_b.dot(theta_best) y_predict plt.plot(X_new, y_predict, "r-") plt.plot(X, y, "b.") plt.axis([0, 2, 0, 15]) plt.show() plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions") plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 2, 0, 15]) save_fig("linear_model_predictions_plot") plt.show() from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X, y) lin_reg.intercept_, lin_reg.coef_ lin_reg.predict(X_new) # 싸이파이 lstsq() 함수를 사용하려면 scipy.linalg.lstsq(X_b, y)와 같이 씁니다. theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6) theta_best_svd np.linalg.pinv(X_b).dot(y) eta = 0.1 # 학습률 n_iterations = 1000 m = 100 theta = np.random.randn(2,1) # 랜덤 초기화 for iteration in range(n_iterations): gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients theta X_new_b.dot(theta) theta_path_bgd = [] def plot_gradient_descent(theta, eta, theta_path=None): m = len(X_b) plt.plot(X, y, "b.") n_iterations = 1000 for iteration in range(n_iterations): if iteration < 10: y_predict = X_new_b.dot(theta) style = "b-" if iteration > 0 else "r--" plt.plot(X_new, y_predict, style) gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients if theta_path is not None: theta_path.append(theta) plt.xlabel("$x_1$", fontsize=18) plt.axis([0, 2, 0, 15]) plt.title(r"$\eta = {}$".format(eta), fontsize=16) np.random.seed(42) theta = np.random.randn(2,1) # random initialization plt.figure(figsize=(10,4)) plt.subplot(131); plot_gradient_descent(theta, eta=0.02) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd) plt.subplot(133); plot_gradient_descent(theta, eta=0.5) save_fig("gradient_descent_plot") plt.show() theta_path_sgd = [] m = len(X_b) np.random.seed(42) n_epochs = 50 t0, t1 = 5, 50 # 학습 스케줄 하이퍼파라미터 def learning_schedule(t): return t0 / (t + t1) theta = np.random.randn(2,1) # 랜덤 초기화 for epoch in range(n_epochs): for i in range(m): if epoch == 0 and i < 20: # 책에는 없음 y_predict = X_new_b.dot(theta) # 책에는 없음 style = "b-" if i > 0 else "r--" # 책에는 없음 plt.plot(X_new, y_predict, style) # 책에는 없음 random_index = np.random.randint(m) xi = X_b[random_index:random_index+1] yi = y[random_index:random_index+1] gradients = 2 * xi.T.dot(xi.dot(theta) - yi) eta = learning_schedule(epoch * m + i) theta = theta - eta * gradients theta_path_sgd.append(theta) # 책에는 없음 plt.plot(X, y, "b.") # 책에는 없음 plt.xlabel("$x_1$", fontsize=18) # 책에는 없음 plt.ylabel("$y$", rotation=0, fontsize=18) # 책에는 없음 plt.axis([0, 2, 0, 15]) # 책에는 없음 save_fig("sgd_plot") # 책에는 없음 plt.show() # 책에는 없음 theta from sklearn.linear_model import SGDRegressor sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1, random_state=42) sgd_reg.fit(X, y.ravel()) sgd_reg.intercept_, sgd_reg.coef_ theta_path_mgd = [] n_iterations = 50 minibatch_size = 20 np.random.seed(42) theta = np.random.randn(2,1) # 랜덤 초기화 t0, t1 = 200, 1000 def learning_schedule(t): return t0 / (t + t1) t = 0 for epoch in range(n_iterations): shuffled_indices = np.random.permutation(m) X_b_shuffled = X_b[shuffled_indices] y_shuffled = y[shuffled_indices] for i in range(0, m, minibatch_size): t += 1 xi = X_b_shuffled[i:i+minibatch_size] yi = y_shuffled[i:i+minibatch_size] gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi) eta = learning_schedule(t) theta = theta - eta * gradients theta_path_mgd.append(theta) theta theta_path_bgd = np.array(theta_path_bgd) theta_path_sgd = np.array(theta_path_sgd) theta_path_mgd = np.array(theta_path_mgd) plt.figure(figsize=(7,4)) plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic") plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch") plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch") plt.legend(loc="upper left", fontsize=16) plt.xlabel(r"$\theta_0$", fontsize=20) plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0) plt.axis([2.5, 4.5, 2.3, 3.9]) save_fig("gradient_descent_paths_plot") plt.show() import numpy as np import numpy.random as rnd np.random.seed(42) m = 100 X = 6 * np.random.rand(m, 1) - 3 y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1) plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([-3, 3, 0, 10]) save_fig("quadratic_data_plot") plt.show() from sklearn.preprocessing import PolynomialFeatures poly_features = PolynomialFeatures(degree=2, include_bias=False) X_poly = poly_features.fit_transform(X) X[0] X_poly[0] lin_reg = LinearRegression() lin_reg.fit(X_poly, y) lin_reg.intercept_, lin_reg.coef_ X_new=np.linspace(-3, 3, 100).reshape(100, 1) X_new_poly = poly_features.transform(X_new) y_new = lin_reg.predict(X_new_poly) plt.plot(X, y, "b.") plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) plt.axis([-3, 3, 0, 10]) save_fig("quadratic_predictions_plot") plt.show() from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)): polybig_features = PolynomialFeatures(degree=degree, include_bias=False) std_scaler = StandardScaler() lin_reg = LinearRegression() polynomial_regression = Pipeline([ ("poly_features", polybig_features), ("std_scaler", std_scaler), ("lin_reg", lin_reg), ]) polynomial_regression.fit(X, y) y_newbig = polynomial_regression.predict(X_new) plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width) plt.plot(X, y, "b.", linewidth=3) plt.legend(loc="upper left") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([-3, 3, 0, 10]) save_fig("high_degree_polynomials_plot") plt.show() from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split def plot_learning_curves(model, X, y): X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10) train_errors, val_errors = [], [] for m in range(1, len(X_train)): model.fit(X_train[:m], y_train[:m]) y_train_predict = model.predict(X_train[:m]) y_val_predict = model.predict(X_val) train_errors.append(mean_squared_error(y_train[:m], y_train_predict)) val_errors.append(mean_squared_error(y_val, y_val_predict)) plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train") plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val") plt.legend(loc="upper right", fontsize=14) # 책에는 없음 plt.xlabel("Training set size", fontsize=14) # 책에는 없음 plt.ylabel("RMSE", fontsize=14) # 책에는 없음 lin_reg = LinearRegression() plot_learning_curves(lin_reg, X, y) plt.axis([0, 80, 0, 3]) # 책에는 없음 save_fig("underfitting_learning_curves_plot") # 책에는 없음 plt.show() # 책에는 없음 from sklearn.pipeline import Pipeline polynomial_regression = Pipeline([ ("poly_features", PolynomialFeatures(degree=10, include_bias=False)), ("lin_reg", LinearRegression()), ]) plot_learning_curves(polynomial_regression, X, y) plt.axis([0, 80, 0, 3]) # 책에는 없음 save_fig("learning_curves_plot") # 책에는 없음 plt.show() # 책에는 없음 np.random.seed(42) m = 20 X = 3 * np.random.rand(m, 1) y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5 X_new = np.linspace(0, 3, 100).reshape(100, 1) from sklearn.linear_model import Ridge ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42) ridge_reg.fit(X, y) ridge_reg.predict([[1.5]]) ridge_reg = Ridge(alpha=1, solver="sag", random_state=42) ridge_reg.fit(X, y) ridge_reg.predict([[1.5]]) from sklearn.linear_model import Ridge def plot_model(model_class, polynomial, alphas, **model_kargs): for alpha, style in zip(alphas, ("b-", "g--", "r:")): model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression() if polynomial: model = Pipeline([ ("poly_features", PolynomialFeatures(degree=10, include_bias=False)), ("std_scaler", StandardScaler()), ("regul_reg", model), ]) model.fit(X, y) y_new_regul = model.predict(X_new) lw = 2 if alpha > 0 else 1 plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha)) plt.plot(X, y, "b.", linewidth=3) plt.legend(loc="upper left", fontsize=15) plt.xlabel("$x_1$", fontsize=18) plt.axis([0, 3, 0, 4]) plt.figure(figsize=(8,4)) plt.subplot(121) plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(122) plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42) save_fig("ridge_regression_plot") plt.show() sgd_reg = SGDRegressor(penalty="l2", max_iter=1000, tol=1e-3, random_state=42) sgd_reg.fit(X, y.ravel()) sgd_reg.predict([[1.5]]) from sklearn.linear_model import Lasso plt.figure(figsize=(8,4)) plt.subplot(121) plot_model(Lasso, polynomial=False, alphas=(0, 0.1, 1), random_state=42) plt.ylabel("$y$", rotation=0, fontsize=18) plt.subplot(122) plot_model(Lasso, polynomial=True, alphas=(0, 10**-7, 1), random_state=42) save_fig("lasso_regression_plot") plt.show() from sklearn.linear_model import Lasso lasso_reg = Lasso(alpha=0.1) lasso_reg.fit(X, y) lasso_reg.predict([[1.5]]) from sklearn.linear_model import ElasticNet elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5, random_state=42) elastic_net.fit(X, y) elastic_net.predict([[1.5]]) np.random.seed(42) m = 100 X = 6 * np.random.rand(m, 1) - 3 y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1) X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10) from copy import deepcopy poly_scaler = Pipeline([ ("poly_features", PolynomialFeatures(degree=90, include_bias=False)), ("std_scaler", StandardScaler()) ]) X_train_poly_scaled = poly_scaler.fit_transform(X_train) X_val_poly_scaled = poly_scaler.transform(X_val) sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True, penalty=None, learning_rate="constant", eta0=0.0005, random_state=42) minimum_val_error = float("inf") best_epoch = None best_model = None for epoch in range(1000): sgd_reg.fit(X_train_poly_scaled, y_train) # 중지된 곳에서 다시 시작합니다 y_val_predict = sgd_reg.predict(X_val_poly_scaled) val_error = mean_squared_error(y_val, y_val_predict) if val_error < minimum_val_error: minimum_val_error = val_error best_epoch = epoch best_model = deepcopy(sgd_reg) sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True, penalty=None, learning_rate="constant", eta0=0.0005, random_state=42) n_epochs = 500 train_errors, val_errors = [], [] for epoch in range(n_epochs): sgd_reg.fit(X_train_poly_scaled, y_train) y_train_predict = sgd_reg.predict(X_train_poly_scaled) y_val_predict = sgd_reg.predict(X_val_poly_scaled) train_errors.append(mean_squared_error(y_train, y_train_predict)) val_errors.append(mean_squared_error(y_val, y_val_predict)) best_epoch = np.argmin(val_errors) best_val_rmse = np.sqrt(val_errors[best_epoch]) plt.annotate('Best model', xy=(best_epoch, best_val_rmse), xytext=(best_epoch, best_val_rmse + 1), ha="center", arrowprops=dict(facecolor='black', shrink=0.05), fontsize=16, ) best_val_rmse -= 0.03 # just to make the graph look better plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2) plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="Validation set") plt.plot(np.sqrt(train_errors), "r--", linewidth=2, label="Training set") plt.legend(loc="upper right", fontsize=14) plt.xlabel("Epoch", fontsize=14) plt.ylabel("RMSE", fontsize=14) save_fig("early_stopping_plot") plt.show() best_epoch, best_model %matplotlib inline import matplotlib.pyplot as plt import numpy as np t1a, t1b, t2a, t2b = -1, 3, -1.5, 1.5 t1s = np.linspace(t1a, t1b, 500) t2s = np.linspace(t2a, t2b, 500) t1, t2 = np.meshgrid(t1s, t2s) T = np.c_[t1.ravel(), t2.ravel()] Xr = np.array([[1, 1], [1, -1], [1, 0.5]]) yr = 2 * Xr[:, :1] + 0.5 * Xr[:, 1:] J = (1/len(Xr) * np.sum((T.dot(Xr.T) - yr.T)**2, axis=1)).reshape(t1.shape) N1 = np.linalg.norm(T, ord=1, axis=1).reshape(t1.shape) N2 = np.linalg.norm(T, ord=2, axis=1).reshape(t1.shape) t_min_idx = np.unravel_index(np.argmin(J), J.shape) t1_min, t2_min = t1[t_min_idx], t2[t_min_idx] t_init = np.array([[0.25], [-1]]) def bgd_path(theta, X, y, l1, l2, core = 1, eta = 0.05, n_iterations = 200): path = [theta] for iteration in range(n_iterations): gradients = core * 2/len(X) * X.T.dot(X.dot(theta) - y) + l1 * np.sign(theta) + l2 * theta theta = theta - eta * gradients path.append(theta) return np.array(path) fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10.1, 8)) for i, N, l1, l2, title in ((0, N1, 2., 0, "Lasso"), (1, N2, 0, 2., "Ridge")): JR = J + l1 * N1 + l2 * 0.5 * N2**2 tr_min_idx = np.unravel_index(np.argmin(JR), JR.shape) t1r_min, t2r_min = t1[tr_min_idx], t2[tr_min_idx] levelsJ=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(J) - np.min(J)) + np.min(J) levelsJR=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(JR) - np.min(JR)) + np.min(JR) levelsN=np.linspace(0, np.max(N), 10) path_J = bgd_path(t_init, Xr, yr, l1=0, l2=0) path_JR = bgd_path(t_init, Xr, yr, l1, l2) path_N = bgd_path(np.array([[2.0], [0.5]]), Xr, yr, np.sign(l1)/3, np.sign(l2), core=0) ax = axes[i, 0] ax.grid(True) ax.axhline(y=0, color='k') ax.axvline(x=0, color='k') ax.contourf(t1, t2, N / 2., levels=levelsN) ax.plot(path_N[:, 0], path_N[:, 1], "y--") ax.plot(0, 0, "ys") ax.plot(t1_min, t2_min, "ys") ax.set_title(r"$\ell_{}$ penalty".format(i + 1), fontsize=16) ax.axis([t1a, t1b, t2a, t2b]) if i == 1: ax.set_xlabel(r"$\theta_1$", fontsize=16) ax.set_ylabel(r"$\theta_2$", fontsize=16, rotation=0) ax = axes[i, 1] ax.grid(True) ax.axhline(y=0, color='k') ax.axvline(x=0, color='k') ax.contourf(t1, t2, JR, levels=levelsJR, alpha=0.9) ax.plot(path_JR[:, 0], path_JR[:, 1], "w-o") ax.plot(path_N[:, 0], path_N[:, 1], "y--") ax.plot(0, 0, "ys") ax.plot(t1_min, t2_min, "ys") ax.plot(t1r_min, t2r_min, "rs") ax.set_title(title, fontsize=16) ax.axis([t1a, t1b, t2a, t2b]) if i == 1: ax.set_xlabel(r"$\theta_1$", fontsize=16) save_fig("lasso_vs_ridge_plot") plt.show() t = np.linspace(-10, 10, 100) sig = 1 / (1 + np.exp(-t)) plt.figure(figsize=(9, 3)) plt.plot([-10, 10], [0, 0], "k-") plt.plot([-10, 10], [0.5, 0.5], "k:") plt.plot([-10, 10], [1, 1], "k:") plt.plot([0, 0], [-1.1, 1.1], "k-") plt.plot(t, sig, "b-", linewidth=2, label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$") plt.xlabel("t") plt.legend(loc="upper left", fontsize=20) plt.axis([-10, 10, -0.1, 1.1]) save_fig("logistic_function_plot") plt.show() from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) print(iris.DESCR) X = iris["data"][:, 3:] # 꽃잎 너비 y = (iris["target"] == 2).astype(np.int) # Iris virginica이면 1 아니면 0 from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression(solver="lbfgs", random_state=42) log_reg.fit(X, y) X_new = np.linspace(0, 3, 1000).reshape(-1, 1) y_proba = log_reg.predict_proba(X_new) plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris virginica") plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris virginica") X_new = np.linspace(0, 3, 1000).reshape(-1, 1) y_proba = log_reg.predict_proba(X_new) decision_boundary = X_new[y_proba[:, 1] >= 0.5][0] plt.figure(figsize=(8, 3)) plt.plot(X[y==0], y[y==0], "bs") plt.plot(X[y==1], y[y==1], "g^") plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2) plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris virginica") plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris virginica") plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center") plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b') plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g') plt.xlabel("Petal width (cm)", fontsize=14) plt.ylabel("Probability", fontsize=14) plt.legend(loc="center left", fontsize=14) plt.axis([0, 3, -0.02, 1.02]) save_fig("logistic_regression_plot") plt.show() decision_boundary log_reg.predict([[1.7], [1.5]]) from sklearn.linear_model import LogisticRegression X = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.int) log_reg = LogisticRegression(solver="lbfgs", C=10**10, random_state=42) log_reg.fit(X, y) x0, x1 = np.meshgrid( np.linspace(2.9, 7, 500).reshape(-1, 1), np.linspace(0.8, 2.7, 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] y_proba = log_reg.predict_proba(X_new) plt.figure(figsize=(10, 4)) plt.plot(X[y==0, 0], X[y==0, 1], "bs") plt.plot(X[y==1, 0], X[y==1, 1], "g^") zz = y_proba[:, 1].reshape(x0.shape) contour = plt.contour(x0, x1, zz, cmap=plt.cm.brg) left_right = np.array([2.9, 7]) boundary = -(log_reg.coef_[0][0] * left_right + log_reg.intercept_[0]) / log_reg.coef_[0][1] plt.clabel(contour, inline=1, fontsize=12) plt.plot(left_right, boundary, "k--", linewidth=3) plt.text(3.5, 1.5, "Not Iris virginica", fontsize=14, color="b", ha="center") plt.text(6.5, 2.3, "Iris virginica", fontsize=14, color="g", ha="center") plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.axis([2.9, 7, 0.8, 2.7]) save_fig("logistic_regression_contour_plot") plt.show() X = iris["data"][:, (2, 3)] # 꽃잎 길이, 꽃잎 너비 y = iris["target"] softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10, random_state=42) softmax_reg.fit(X, y) x0, x1 = np.meshgrid( np.linspace(0, 8, 500).reshape(-1, 1), np.linspace(0, 3.5, 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] y_proba = softmax_reg.predict_proba(X_new) y_predict = softmax_reg.predict(X_new) zz1 = y_proba[:, 1].reshape(x0.shape) zz = y_predict.reshape(x0.shape) plt.figure(figsize=(10, 4)) plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris virginica") plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris versicolor") plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris setosa") from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x0, x1, zz, cmap=custom_cmap) contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg) plt.clabel(contour, inline=1, fontsize=12) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="center left", fontsize=14) plt.axis([0, 7, 0, 3.5]) save_fig("softmax_regression_contour_plot") plt.show() softmax_reg.predict([[5, 2]]) softmax_reg.predict_proba([[5, 2]]) X = iris["data"][:, (2, 3)] # 꽃잎 길이, 꽃잎 넓이 y = iris["target"] X_with_bias = np.c_[np.ones([len(X), 1]), X] np.random.seed(2042) test_ratio = 0.2 validation_ratio = 0.2 total_size = len(X_with_bias) test_size = int(total_size * test_ratio) validation_size = int(total_size * validation_ratio) train_size = total_size - test_size - validation_size rnd_indices = np.random.permutation(total_size) X_train = X_with_bias[rnd_indices[:train_size]] y_train = y[rnd_indices[:train_size]] X_valid = X_with_bias[rnd_indices[train_size:-test_size]] y_valid = y[rnd_indices[train_size:-test_size]] X_test = X_with_bias[rnd_indices[-test_size:]] y_test = y[rnd_indices[-test_size:]] def to_one_hot(y): n_classes = y.max() + 1 m = len(y) Y_one_hot = np.zeros((m, n_classes)) Y_one_hot[np.arange(m), y] = 1 return Y_one_hot y_train[:10] to_one_hot(y_train[:10]) Y_train_one_hot = to_one_hot(y_train) Y_valid_one_hot = to_one_hot(y_valid) Y_test_one_hot = to_one_hot(y_test) def softmax(logits): exps = np.exp(logits) exp_sums = np.sum(exps, axis=1, keepdims=True) return exps / exp_sums n_inputs = X_train.shape[1] # == 3 (특성 2개와 편향) n_outputs = len(np.unique(y_train)) # == 3 (3개의 붓꽃 클래스) eta = 0.01 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 Theta = np.random.randn(n_inputs, n_outputs) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1)) error = Y_proba - Y_train_one_hot if iteration % 500 == 0: print(iteration, loss) gradients = 1/m * X_train.T.dot(error) Theta = Theta - eta * gradients Theta logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_score eta = 0.1 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 alpha = 0.1 # 규제 하이퍼파라미터 Theta = np.random.randn(n_inputs, n_outputs) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss error = Y_proba - Y_train_one_hot if iteration % 500 == 0: print(iteration, loss) gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_outputs]), alpha * Theta[1:]] Theta = Theta - eta * gradients logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_score eta = 0.1 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 alpha = 0.1 # 규제 하이퍼파라미터 best_loss = np.infty Theta = np.random.randn(n_inputs, n_outputs) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss error = Y_proba - Y_train_one_hot gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_outputs]), alpha * Theta[1:]] Theta = Theta - eta * gradients logits = X_valid.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(Y_valid_one_hot * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss if iteration % 500 == 0: print(iteration, loss) if loss < best_loss: best_loss = loss else: print(iteration - 1, best_loss) print(iteration, loss, "조기 종료!") break logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_score x0, x1 = np.meshgrid( np.linspace(0, 8, 500).reshape(-1, 1), np.linspace(0, 3.5, 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] X_new_with_bias = np.c_[np.ones([len(X_new), 1]), X_new] logits = X_new_with_bias.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) zz1 = Y_proba[:, 1].reshape(x0.shape) zz = y_predict.reshape(x0.shape) plt.figure(figsize=(10, 4)) plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris virginica") plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris versicolor") plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris setosa") from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x0, x1, zz, cmap=custom_cmap) contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg) plt.clabel(contour, inline=1, fontsize=12) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 7, 0, 3.5]) plt.show() logits = X_test.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_test) accuracy_score
0.373533
0.980167
## import libraries ``` import pandas import configparser import psycopg2 config = configparser.ConfigParser() config.read('config.ini') host = config['myaws']['host'] db = config['myaws']['db'] user = config['myaws']['user'] pwd = config['myaws']['pwd'] conn = psycopg2.connect(host = host, user = user, password = pwd, dbname = db ) cur = conn.cursor() ``` ## create the hosue table ``` # replace the schema and table name to your schema and table name table_sql = """ CREATE TABLE IF NOT EXISTS gp29.house ( price integer, bed integer, bath integer, area integer, address VARCHAR(200), PRIMARY KEY(address) ); """ #conn.rollback() #table_sql="drop table if exists demo.house" cur.execute(table_sql) conn.commit() ``` ## define the search region ``` url = 'https://www.trulia.com/VA/McLean/22101/' import urllib.request response = urllib.request.urlopen(url) html_data= response.read() #print(html_data.decode('utf-8')) from bs4 import BeautifulSoup soup = BeautifulSoup(html_data,'html.parser') #print (soup) ``` ## insert the records into database ``` for li_class in soup.find_all('li', class_ = 'Grid__CellBox-sc-144isrp-0 SearchResultsList__WideCell-b7y9ki-2 jiZmPM'): try: for price_div in li_class.find_all('div',{'data-testid':'property-price'}): price =int(price_div.text.replace('$','').replace(",","")) for bed_div in li_class.find_all('div', {'data-testid':'property-beds'}): bed= int(bed_div.text.replace('bd','').replace(",","")) for bath_div in li_class.find_all('div',{'data-testid':'property-baths'}): bath =int(bath_div.text.replace('ba','').replace(",","")) for area_div in li_class.find_all('div',{'data-testid':'property-floorSpace'}): area=int(area_div.text.split('sqft')[0].replace(",","")) for address_div in li_class.find_all('div',{'data-testid':'property-address'}): address =address_div.text try: sql_insert = """ insert into gp29.house(price,bed,bath,area,address) values('{}','{}','{}','{}','{}') """.format(price,bed,bath,area,address) cur.execute(sql_insert) conn.commit() except: conn.rollback() except: pass ``` ## Query the Table ``` df = pandas.read_sql_query('select * from gp29.house ', conn) df[:10] ``` ## basic stat ``` df.describe() ``` ## price distribution ``` df['price'].hist() ``` ## bed vs bath ``` df.plot.scatter(x='bed',y='bath') ```
github_jupyter
import pandas import configparser import psycopg2 config = configparser.ConfigParser() config.read('config.ini') host = config['myaws']['host'] db = config['myaws']['db'] user = config['myaws']['user'] pwd = config['myaws']['pwd'] conn = psycopg2.connect(host = host, user = user, password = pwd, dbname = db ) cur = conn.cursor() # replace the schema and table name to your schema and table name table_sql = """ CREATE TABLE IF NOT EXISTS gp29.house ( price integer, bed integer, bath integer, area integer, address VARCHAR(200), PRIMARY KEY(address) ); """ #conn.rollback() #table_sql="drop table if exists demo.house" cur.execute(table_sql) conn.commit() url = 'https://www.trulia.com/VA/McLean/22101/' import urllib.request response = urllib.request.urlopen(url) html_data= response.read() #print(html_data.decode('utf-8')) from bs4 import BeautifulSoup soup = BeautifulSoup(html_data,'html.parser') #print (soup) for li_class in soup.find_all('li', class_ = 'Grid__CellBox-sc-144isrp-0 SearchResultsList__WideCell-b7y9ki-2 jiZmPM'): try: for price_div in li_class.find_all('div',{'data-testid':'property-price'}): price =int(price_div.text.replace('$','').replace(",","")) for bed_div in li_class.find_all('div', {'data-testid':'property-beds'}): bed= int(bed_div.text.replace('bd','').replace(",","")) for bath_div in li_class.find_all('div',{'data-testid':'property-baths'}): bath =int(bath_div.text.replace('ba','').replace(",","")) for area_div in li_class.find_all('div',{'data-testid':'property-floorSpace'}): area=int(area_div.text.split('sqft')[0].replace(",","")) for address_div in li_class.find_all('div',{'data-testid':'property-address'}): address =address_div.text try: sql_insert = """ insert into gp29.house(price,bed,bath,area,address) values('{}','{}','{}','{}','{}') """.format(price,bed,bath,area,address) cur.execute(sql_insert) conn.commit() except: conn.rollback() except: pass df = pandas.read_sql_query('select * from gp29.house ', conn) df[:10] df.describe() df['price'].hist() df.plot.scatter(x='bed',y='bath')
0.142709
0.195575