markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
node features seem very sensitive perturb topology
# keep backup backup = data.edge_index.clone() backup perturb_data_list = [] for i in range(1000): # clone original data pData = data.clone() # noise parameters noEdgeSwap = 3 # create edges edges = pData.edge_index.T.tolist() edges = np.array(edges) edges = [(x[0][0], x[0][1], {"feat": str(x[1])}) for x in list(zip(edges.tolist(), pData.edge_attr.tolist()))] nodes = [(x[0], {"feat": str(x[1])}) for x in enumerate(pData.x.tolist())] G = nx.Graph() G.add_nodes_from(nodes) G.add_edges_from(edges) # swap edges G = nx.double_edge_swap(G, noEdgeSwap) # both directions newEdges = list(G.edges()) + [(x[1], x[0]) for x in G.edges()] newEdges = torch.tensor(newEdges).T # set value pData.edge_index = newEdges perturb_data_list.append(pData) # visualise some graphs if i % 50 == 0: plt.figure(figsize=(2, 2)) nx.draw(G) plt.show() len(perturb_data_list) valid_loader = DataLoader(perturb_data_list, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers) # get data batch = list(valid_loader)[0] batch = batch.to(device) with torch.no_grad(): pred = model(batch) #.view(-1,) pred.shape plt.title("Perturb topology. Label: {:.2f}".format(y_true)) plt.hist(pred.view(-1).tolist()) plt.axvline(y_pred, c="r") plt.show()
_____no_output_____
MIT
examples/lsc/pcqm4m/.ipynb_checkpoints/triplet-loss-checkpoint.ipynb
edwardelson/ogb
Copyright Netherlands eScience Center and Centrum Wiskunde & Informatica ** Function : Emotion recognition and forecast with BBConvLSTM** ** Author : Yang Liu** ** Contributor : Tianyi Zhang (Centrum Wiskunde & Informatica)** Last Update : 2021.02.08 ** ** Last Update : 2021.02.12 ** ** Library : Pytorth, Numpy, os, DLACs, matplotlib **Description : This notebook serves to test the prediction skill of deep neural networks in emotion recognition and forecast. The Bayesian convolutional Long Short Time Memory neural network with Bernoulli approximate variational inference is used to deal with this spatial-temporal sequence problem. We use Pytorch as the deep learning framework. ** Many to one prediction.** Return Values : Time series and figures **This project is a joint venture between NLeSC and CWI** The method comes from the study by Shi et. al. (2015) Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting.
%matplotlib inline import sys import numbers import pickle # for data loading import os # for pre-processing and machine learning import numpy as np import csv #import sklearn from scipy.signal import resample # for visualization import matplotlib import matplotlib.pyplot as plt from matplotlib.pyplot import cm
_____no_output_____
Apache-2.0
tests/data_preview.ipynb
geek-yang/NEmo
The testing device is Dell Inspirion 5680 with Intel Core i7-8700 x64 CPU and Nvidia GTX 1060 6GB GPU.Here is a benchmark about cpu v.s. gtx 1060 https://www.analyticsindiamag.com/deep-learning-tensorflow-benchmark-intel-i5-4210u-vs-geforce-nvidia-1060-6gb/
################################################################################# ######### datapath ######## ################################################################################# # please specify data path datapath = 'H:\\Creator_Zone\\Script_craft\\NEmo\\Data_CASE' output_path = 'H:\\Creator_Zone\\Script_craft\\NEmo\\results' model_path = 'H:\\Creator_Zone\\Script_craft\\NEmo\\models' # please specify the constants for input data window_size = 2000 # down-sampling constant seq = 20 v_a = 0 # valance = 0, arousal = 1 # leave-one-out training and testing num_s = 2 f = open(os.path.join(datapath, 'data_{}s'.format(int(window_size/100))),'rb') data = pickle.load(f) f.close() samples = data["Samples"] labels = data["label_s"] subject_id = data["Subject_id"] print(subject_id) x_train = samples[np.where(subject_id!=num_s)[0],:,0:4] x_test = samples[np.where(subject_id==num_s)[0],:,0:4] y_train = np.zeros([0,int(window_size/seq),1]) y_test = np.zeros([0,int(window_size/seq),1]) for i in range(len(labels)): sig = resample(labels[i][:,v_a],int(window_size/seq)).reshape([1,-1,1])/9 if subject_id[i] == num_s: y_test = np.concatenate([y_test,sig],axis = 0) else: y_train = np.concatenate([y_train,sig],axis = 0)
_____no_output_____
Apache-2.0
tests/data_preview.ipynb
geek-yang/NEmo
Recurrent PPO landing using raw altimeter readings
import numpy as np import os,sys sys.path.append('../../../RL_lib/Agents/PPO') sys.path.append('../../../RL_lib/Utils') sys.path.append('../../../Mars3dof_env') sys.path.append('../../../Mars_DTM') %load_ext autoreload %load_ext autoreload %autoreload 2 %matplotlib nbagg import os print(os.getcwd()) %%html <style> .output_wrapper, .output { height:auto !important; max-height:1000px; /* your desired max-height here */ } .output_scroll { box-shadow:none !important; webkit-box-shadow:none !important; } </style>
_____no_output_____
MIT
Experiments/Mars3DOF/Mars_landing_DTM/altimeter_v_mm3-120step.ipynb
CHEN-yongquan/RL-Meta-Learning-ACTA
Optimize Policy
from env import Env import env_utils as envu from dynamics_model import Dynamics_model from lander_model import Lander_model from ic_gen2 import Landing_icgen import rl_utils from arch_policy_vf import Arch from model import Model from policy import Policy from value_function import Value_function import pcm_model_nets as model_nets import policy_nets as policy_nets import valfunc_nets as valfunc_nets from agent import Agent import torch.nn as nn from flat_constraint import Flat_constraint from glideslope_constraint import Glideslope_constraint from reward_terminal_mdr import Reward from dtm_measurement_model3 import DTM_measurement_model from altimeter_v import Altimeter dtm = np.load('../../../Mars_DTM/synth_elevations.npy') print(dtm.shape, np.min(dtm), np.max(dtm)) target_position = np.asarray([4000,4000,400]) mm = DTM_measurement_model(dtm,check_vertical_errors=False) altimeter = Altimeter(mm,target_position,theta=np.pi/8) arch = Arch() logger = rl_utils.Logger() dynamics_model = Dynamics_model() lander_model = Lander_model(altimeter=altimeter, apf_tau1=20,apf_tau2=100,apf_vf1=-2,apf_vf2=-1,apf_v0=70,apf_atarg=15.) lander_model.get_state_agent = lander_model.get_state_agent_dtm obs_dim = 8 act_dim = 3 recurrent_steps = 120 reward_object = Reward() glideslope_constraint = Glideslope_constraint(gs_limit=0.5) shape_constraint = Flat_constraint() env = Env(lander_model,dynamics_model,logger, reward_object=reward_object, glideslope_constraint=glideslope_constraint, shape_constraint=shape_constraint, tf_limit=100.0,print_every=10,scale_agent_action=True) env.ic_gen = Landing_icgen(mass_uncertainty=0.10, g_uncertainty=(0.05,0.05), adjust_apf_v0=True, downrange = (0,2000 , -70, -30), crossrange = (-1000,1000 , -30,30), altitude = (2300,2400,-90,-70)) env.ic_gen.show() arch = Arch() policy = Policy(policy_nets.GRU(obs_dim, act_dim, recurrent_steps=recurrent_steps), shuffle=False, kl_targ=0.001,epochs=20, beta=0.1, servo_kl=True, max_grad_norm=30, init_func=rl_utils.xn_init) value_function = Value_function(valfunc_nets.GRU(obs_dim, recurrent_steps=recurrent_steps), shuffle=False, batch_size=9999999, max_grad_norm=30) agent = Agent(arch, policy, value_function, None, env, logger, policy_episodes=30, policy_steps=3000, gamma1=0.95, gamma2=0.995, lam=0.98, recurrent_steps=recurrent_steps, monitor=env.rl_stats) load_params=True fname = "altimeter_v_mm3-120step" if load_params: policy.load_params(fname) value_function.load_params(fname) else: agent.train(30000) fname = "altimeter_v_mm3-120step" policy.save_params(fname) value_function.save_params(fname) np.save(fname + "_history",env.rl_stats.history)
_____no_output_____
MIT
Experiments/Mars3DOF/Mars_landing_DTM/altimeter_v_mm3-120step.ipynb
CHEN-yongquan/RL-Meta-Learning-ACTA
Test Policy with Realistic Noise
policy.test_mode=True env.test_policy_batch(agent,1000,print_every=100) len(lander_model.trajectory_list) traj_list = lander_model.trajectory_list[0:100] len(traj_list) np.save(fname + '_100traj',traj_list) envu.plot_rf_vf(env.rl_stats.history)
_____no_output_____
MIT
Experiments/Mars3DOF/Mars_landing_DTM/altimeter_v_mm3-120step.ipynb
CHEN-yongquan/RL-Meta-Learning-ACTA
Flowers classifier using Transfer Learning and tf.dataAccuracy : 0.9090909090909091Classification Report precision recall f1-score support 0 0.96429 0.90000 0.93103 60 1 0.88750 0.98611 0.93421 72 2 0.81538 0.89831 0.85484 59 3 0.98462 0.86486 0.92086 74 4 0.90698 0.89655 0.90173 87
#""" # Google Collab specific stuff.... from google.colab import drive drive.mount('/content/drive') import os !ls "/content/drive/My Drive" USING_COLLAB = True %tensorflow_version 2.x #""" # Setup sys.path to find MachineLearning lib directory try: USING_COLLAB except NameError: USING_COLLAB = False %load_ext autoreload %autoreload 2 import sys if "MachineLearning" in sys.path[0]: pass else: print(sys.path) if USING_COLLAB: sys.path.insert(0, '/content/drive/My Drive/GitHub/MachineLearning/lib') ###### CHANGE FOR SPECIFIC ENVIRONMENT else: sys.path.insert(0, '/Users/john/Documents/GitHub/MachineLearning/lib') ###### CHANGE FOR SPECIFIC ENVIRONMENT print(sys.path) from __future__ import absolute_import, division, print_function, unicode_literals import os, sys, random, warnings, time, copy, csv, gc import numpy as np import IPython.display as display from PIL import Image import matplotlib.pyplot as plt %matplotlib inline import cv2 from tqdm import tqdm_notebook, tnrange, tqdm import pandas as pd import tensorflow as tf print(tf.__version__) AUTOTUNE = tf.data.experimental.AUTOTUNE print("AUTOTUNE: ", AUTOTUNE) from TrainingUtils import * #warnings.filterwarnings("ignore", category=DeprecationWarning) #warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
_____no_output_____
MIT
1-Flowers/FlowersTransfer-TF-Data-V1.ipynb
bo9zbo9z/MachineLearning
Examine and understand data
# GLOBALS/CONFIG ITEMS # Set root directory path to data if USING_COLLAB: ROOT_PATH = "/content/drive/My Drive/ImageData/Flowers" ###### CHANGE FOR SPECIFIC ENVIRONMENT else: ROOT_PATH = "/Users/john/Documents/ImageData/Flowers" ###### CHANGE FOR SPECIFIC ENVIRONMENT # Establish global dictionary parms = GlobalParms(ROOT_PATH=ROOT_PATH, TRAIN_DIR="train", SMALL_RUN=False, NUM_CLASSES=5, IMAGE_ROWS=224, IMAGE_COLS=224, IMAGE_CHANNELS=3, BATCH_SIZE=32, EPOCS=10, IMAGE_EXT=".jpg", FINAL_ACTIVATION='sigmoid', LOSS='binary_crossentropy', METRICS=['accuracy']) parms.print_contents() #""" # If not loaded, uncomment one of these to load the database as needed # This loads the files into a temporary directory load_dir = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', fname='flower_photos', untar=True) # This loads the files into a actual directory, WILL TAKE LONGER TO UNZIP AND TRAIN. But stored on Drive #load_dir = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', # fname='flower_photos', untar=True, cache_subdir=parms.TRAIN_PATH) # set new value for TRAIN_PATH parms.set_train_path(load_dir) # If we downloaded the images, then overide TRAIN_PATH print(load_dir, parms.TRAIN_PATH) #""" if parms.SMALL_RUN: max_subdir_files = 10 else: max_subdir_files = 1000000 images_list, sub_directories = load_file_names_labeled_subdir_Util(parms.TRAIN_PATH, parms.IMAGE_EXT, max_dir_files=max_subdir_files) images_list_len = len(images_list) print("Number of images: ", images_list_len) random.shuffle(images_list) # randomize the list # Set the class names. parms.set_class_names(sub_directories) print("Classes: ", parms.NUM_CLASSES, " Labels: ", len(parms.CLASS_NAMES), " ", parms.CLASS_NAMES) # Show a few images for image_path in images_list[:3]: print(image_path) display.display(Image.open(str(image_path))) # Create Dataset from list of images full_dataset = tf.data.Dataset.from_tensor_slices(np.array(images_list)) full_dataset = full_dataset.shuffle(images_list_len) # Verify image paths were loaded and save one path for later in "some_image" for f in full_dataset.take(5): some_image = f.numpy().decode("utf-8") print(f.numpy()) print("Some Image: ", some_image)
_____no_output_____
MIT
1-Flowers/FlowersTransfer-TF-Data-V1.ipynb
bo9zbo9z/MachineLearning
Build an input pipeline
def get_label(file_path): # convert the path to a list of path components parts = tf.strings.split(file_path, os.path.sep) # The second to last is the class-directory return parts[-2] == parms.CLASS_NAMES def decode_image(image): # convert the compressed string to a 3D uint8 tensor image = tf.image.decode_jpeg(image, channels=parms.IMAGE_CHANNELS) # Use `convert_image_dtype` to convert to floats in the [0,1] range. image = tf.image.convert_image_dtype(image, parms.IMAGE_DTYPE) # resize the image to the desired size. return tf.image.resize(image, [parms.IMAGE_ROWS, parms.IMAGE_COLS]) def image_aug(image): # do any augmentations if tf.random.uniform(()) > 0.25: k = tf.random.uniform(shape=[], minval=1, maxval=4, dtype=tf.int32) image = tf.image.rot90(image, k) #0-4, 0/270, 90/180/270 image = tf.clip_by_value(image, 0, 1) # always clip back to 0, 1 before returning return image def process_path_train(file_path): label = get_label(file_path) # load the raw data from the file as a string image = tf.io.read_file(file_path) image = decode_image(image) # add any augmentations image = image_aug(image) return image, label def process_path_val(file_path): label = get_label(file_path) # load the raw data from the file as a string image = tf.io.read_file(file_path) image = decode_image(image) return image, label def prepare_for_training(ds, cache=True, shuffle_buffer_size=1000): # This is a small dataset, only load it once, and keep it in memory. # use `.cache(filename)` to cache preprocessing work for datasets that don't # fit in memory. if cache: if isinstance(cache, str): ds = ds.cache(cache) else: ds = ds.cache() ds = ds.shuffle(buffer_size=shuffle_buffer_size) # Repeat forever ds = ds.repeat() ds = ds.batch(parms.BATCH_SIZE) # `prefetch` lets the dataset fetch batches in the background while the model # is training. ds = ds.prefetch(buffer_size=AUTOTUNE) return ds # display images.... def show_batch(image_batch, label_batch, number_to_show=25): plt.figure(figsize=(10,10)) show_number = number_to_show if parms.BATCH_SIZE < number_to_show: show_number = parms.BATCH_SIZE for n in range(show_number): ax = plt.subplot(5,5,n+1) plt.imshow(tf.keras.preprocessing.image.array_to_img(image_batch[n])) plt.title(parms.CLASS_NAMES[np.argmax(label_batch[n])].title()) plt.axis('off') # split into training and validation sets of images train_len = int(0.9 * images_list_len) val_len = images_list_len - train_len # Create datasets with new sizes train_dataset = full_dataset.take(train_len) # Creates dataset with new size val_dataset = full_dataset.skip(train_len) # Creates dataset after skipping over the size print("Total number: ", images_list_len, " Train number: ", train_len, " Val number: ", val_len) # map training images to processing, includes any augmentation train_dataset = train_dataset.map(process_path_train, num_parallel_calls=AUTOTUNE) # Verify the mapping worked for image, label in train_dataset.take(1): print("Image shape: ", image.numpy().shape) print("Label: ", label.numpy()) # Ready to be used for training train_dataset = prepare_for_training(train_dataset) # map validation images to processing val_dataset = val_dataset.map(process_path_val, num_parallel_calls=AUTOTUNE) # Verify the mapping worked for image, label in val_dataset.take(1): print("Image shape: ", image.numpy().shape) print("Label: ", label.numpy()) # Ready to be used for training val_dataset = prepare_for_training(val_dataset) # Test Training image_batch, label_batch = next(iter(train_dataset)) show_batch(image_batch.numpy(), label_batch.numpy()) # Test Validation image_batch, label_batch = next(iter(val_dataset)) show_batch(image_batch.numpy(), label_batch.numpy())
_____no_output_____
MIT
1-Flowers/FlowersTransfer-TF-Data-V1.ipynb
bo9zbo9z/MachineLearning
Build model- add and validate pretrained model as a baseline
# Create any call backs for training...These are the most common. from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger reduce_lr = ReduceLROnPlateau(monitor='loss', patience=2, verbose=1, min_lr=1e-6) earlystopper = EarlyStopping(patience=8, verbose=1) checkpointer = ModelCheckpoint(parms.MODEL_PATH, monitor='val_loss', verbose=1, mode="auto", save_best_only=True) #csv_logger = CSVLogger(self.cvslogfile, append=True, separator=';') #from keras.callbacks import TensorBoard #tensorboard = TensorBoard(log_dir="logs/{}".format(time())) # Create model and compile it from tensorflow.keras.models import Sequential, load_model, Model from tensorflow.keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D, Conv2DTranspose, Concatenate, Activation from tensorflow.keras.losses import binary_crossentropy, categorical_crossentropy from tensorflow.keras.optimizers import Adadelta, Adam, Nadam, SGD ######## #new with transfer learning from tensorflow.keras.applications import MobileNet, imagenet_utils from tensorflow.keras.layers import Dense,GlobalAveragePooling2D actual_MobileNet = tf.keras.applications.mobilenet.MobileNet() def set_train_layers(model, train_layers=20): #since 224x224x3, set the first 20 layers of the network to be non-trainable if train_layers == 0: #set all non-trainable for layer in model.layers: layer.trainable=False else: for layer in model.layers[:train_layers]: layer.trainable=False for layer in model.layers[train_layers:]: layer.trainable=True return model def predict_image(image): image = np.expand_dims(image, axis=0) image = tf.keras.applications.mobilenet.preprocess_input(image) predictions = actual_MobileNet.predict(image) results = imagenet_utils.decode_predictions(predictions) return results #list of decoded imagenet results def build_model(CFG): base_model=MobileNet(weights='imagenet',include_top=False, input_shape=parms.IMAGE_DIM) #imports the mobilenet model and discards the last 1000 neuron layer. x=base_model.output x=GlobalAveragePooling2D()(x) x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results. x=Dense(1024,activation='relu')(x) #dense layer 2 x=Dense(512,activation='relu')(x) #dense layer 3 preds=Dense(parms.NUM_CLASSES, activation=parms.FINAL_ACTIVATION)(x) #final layer model=Model(inputs=base_model.input,outputs=preds) return model def compile_model(CFG, model): model.compile(loss=parms.LOSS, #optimizer=SGD(lr=0.001, momentum=0.9), optimizer=Adam(), metrics=parms.METRICS) return model #test an image just using MobileNet from tensorflow.keras.preprocessing import image img = image.load_img(some_image, target_size=(224, 224)) img_array = image.img_to_array(img) result = predict_image(img_array) result #str(parms.CLASS_NAMES[0])+'/*')) #show the image... from IPython.display import Image Image(filename=some_image) # Show the activation layers, can be trained or initial model (BETA) #model_raw = build_model(CFG) #img_path = os.path.join(parms.TRAIN_PATH, "Cat/2.jpg") #image_show_seq_model_layers_BETA(img_path, model_raw, parms.IMAGE_DIM, # activation_layer_num=0, activation_channel_num=11)
_____no_output_____
MIT
1-Flowers/FlowersTransfer-TF-Data-V1.ipynb
bo9zbo9z/MachineLearning
Train model
# Train model steps_per_epoch = np.ceil(train_len // parms.BATCH_SIZE) # set step sizes based on train & batch validation_steps = np.ceil(val_len // parms.BATCH_SIZE) # set step sizes based on val & batch model = build_model(parms) model = compile_model(parms, model) history = model.fit(train_dataset, validation_data=val_dataset, epochs=parms.EPOCS, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, callbacks=[reduce_lr, earlystopper, checkpointer] # include any callbacks... ) # Plot the training history history_df = pd.DataFrame(history.history) plt.figure() history_df[['loss', 'val_loss']].plot(title="Loss") plt.xlabel('Epocs') plt.ylabel('Loss') history_df[['accuracy', 'val_accuracy']].plot(title="Accuracy") plt.xlabel('Epocs') plt.ylabel('Accuracy') plt.show()
_____no_output_____
MIT
1-Flowers/FlowersTransfer-TF-Data-V1.ipynb
bo9zbo9z/MachineLearning
Validate model's predictions- Create actual_lables and predict_labels- Calculate Confusion Matrix & Accuracy- Display results
#Load saved model from tensorflow.keras.models import load_model def load_saved_model(model_path): model = load_model(model_path) print("loaded: ", model_path) return model model = load_saved_model(parms.MODEL_PATH) # Use model to generate predicted labels and probabilities #labels, predict_labels, predict_probabilities, bad_results = predictions_using_dataset(model, val_dataset, 1, parms.BATCH_SIZE, create_bad_results_list=False) labels, predict_labels, predict_probabilities, bad_results = predictions_using_dataset(model, val_dataset, validation_steps, parms.BATCH_SIZE, create_bad_results_list=False) show_confusion_matrix(labels, predict_labels, parms.CLASS_NAMES) # Graph the results display_prediction_results(labels, predict_labels, predict_probabilities, parms.NUM_CLASSES, parms.CLASS_NAMES) #Create a df from the bad results list, can save as csv or use for further analysis bad_results_df = pd.DataFrame(bad_results, columns =['actual', 'predict', 'prob', 'image']) bad_results_df.head() # default is to not return bad_results, change to include them, create_bad_results_list=True #bad_act, bad_pred, bad_prob, bad_images = zip(*bad_results) # display bad images.... def show_bad_batch(image_batch, bad_act, bad_pred, number_to_show=25): plt.figure(figsize=(10,10)) show_number = number_to_show if len(image_batch) < number_to_show: show_number = len(image_batch) for n in range(show_number): ax = plt.subplot(5,5,n+1) plt.imshow(tf.keras.preprocessing.image.array_to_img(image_batch[n][0])) #s = parms.CLASS_NAMES[bad_pred[n][0]] s = "Act: "+ str(bad_act[n][0]) + " Pred: " + str(bad_pred[n][0]) plt.title(s) plt.axis('off') print(" 0)", parms.CLASS_NAMES[0], " 1)", parms.CLASS_NAMES[1], " 2)", parms.CLASS_NAMES[2], " 3)", parms.CLASS_NAMES[3], " 4)", parms.CLASS_NAMES[4]) #show_bad_batch(bad_images, bad_act, bad_pred)
_____no_output_____
MIT
1-Flowers/FlowersTransfer-TF-Data-V1.ipynb
bo9zbo9z/MachineLearning
Copyright 2021 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Human Pose Classification with MoveNet and TensorFlow LiteThis notebook teaches you how to train a pose classification model using MoveNet and TensorFlow Lite. The result is a new TensorFlow Lite model that accepts the output from the MoveNet model as its input, and outputs a pose classification, such as the name of a yoga pose.The procedure in this notebook consists of 3 parts:* Part 1: Preprocess the pose classification training data into a CSV file that specifies the landmarks (body keypoints) detected by the MoveNet model, along with the ground truth pose labels.* Part 2: Build and train a pose classification model that takes the landmark coordinates from the CSV file as input, and outputs the predicted labels.* Part 3: Convert the pose classification model to TFLite.By default, this notebook uses an image dataset with labeled yoga poses, but we've also included a section in Part 1 where you can upload your own image dataset of poses. View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook See TF Hub model Preparation In this section, you'll import the necessary libraries and define several functions to preprocess the training images into a CSV file that contains the landmark coordinates and ground truth labels.Nothing observable happens here, but you can expand the hidden code cells to see the implementation for some of the functions we'll be calling later on.**If you only want to create the CSV file without knowing all the details, just run this section and proceed to Part 1.**
!pip install -q opencv-python import csv import cv2 import itertools import numpy as np import pandas as pd import os import sys import tempfile import tqdm from matplotlib import pyplot as plt from matplotlib.collections import LineCollection import tensorflow as tf import tensorflow_hub as hub from tensorflow import keras from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Code to run pose estimation using MoveNet
#@title Functions to run pose estimation with MoveNet #@markdown You'll download the MoveNet Thunder model from [TensorFlow Hub](https://www.google.com/url?sa=D&q=https%3A%2F%2Ftfhub.dev%2Fs%3Fq%3Dmovenet), and reuse some inference and visualization logic from the [MoveNet Raspberry Pi (Python)](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/raspberry_pi) sample app to detect landmarks (ear, nose, wrist etc.) from the input images. #@markdown *Note: You should use the most accurate pose estimation model (i.e. MoveNet Thunder) to detect the keypoints and use them to train the pose classification model to achieve the best accuracy. When running inference, you can use a pose estimation model of your choice (e.g. either MoveNet Lightning or Thunder).* # Download model from TF Hub and check out inference code from GitHub !wget -q -O movenet_thunder.tflite https://tfhub.dev/google/lite-model/movenet/singlepose/thunder/tflite/float16/4?lite-format=tflite !git clone https://github.com/tensorflow/examples.git pose_sample_rpi_path = os.path.join(os.getcwd(), 'examples/lite/examples/pose_estimation/raspberry_pi') sys.path.append(pose_sample_rpi_path) # Load MoveNet Thunder model import utils from data import BodyPart from ml import Movenet movenet = Movenet('movenet_thunder') # Define function to run pose estimation using MoveNet Thunder. # You'll apply MoveNet's cropping algorithm and run inference multiple times on # the input image to improve pose estimation accuracy. def detect(input_tensor, inference_count=3): """Runs detection on an input image. Args: input_tensor: A [height, width, 3] Tensor of type tf.float32. Note that height and width can be anything since the image will be immediately resized according to the needs of the model within this function. inference_count: Number of times the model should run repeatly on the same input image to improve detection accuracy. Returns: A Person entity detected by the MoveNet.SinglePose. """ image_height, image_width, channel = input_tensor.shape # Detect pose using the full input image movenet.detect(input_tensor.numpy(), reset_crop_region=True) # Repeatedly using previous detection result to identify the region of # interest and only croping that region to improve detection accuracy for _ in range(inference_count - 1): person = movenet.detect(input_tensor.numpy(), reset_crop_region=False) return person #@title Functions to visualize the pose estimation results. def draw_prediction_on_image( image, person, crop_region=None, close_figure=True, keep_input_size=False): """Draws the keypoint predictions on image. Args: image: An numpy array with shape [height, width, channel] representing the pixel values of the input image. person: A person entity returned from the MoveNet.SinglePose model. close_figure: Whether to close the plt figure after the function returns. keep_input_size: Whether to keep the size of the input image. Returns: An numpy array with shape [out_height, out_width, channel] representing the image overlaid with keypoint predictions. """ # Draw the detection result on top of the image. image_np = utils.visualize(image, [person]) # Plot the image with detection results. height, width, channel = image.shape aspect_ratio = float(width) / height fig, ax = plt.subplots(figsize=(12 * aspect_ratio, 12)) im = ax.imshow(image_np) if close_figure: plt.close(fig) if not keep_input_size: image_np = utils.keep_aspect_ratio_resizer(image_np, (512, 512)) return image_np #@title Code to load the images, detect pose landmarks and save them into a CSV file class MoveNetPreprocessor(object): """Helper class to preprocess pose sample images for classification.""" def __init__(self, images_in_folder, images_out_folder, csvs_out_path): """Creates a preprocessor to detection pose from images and save as CSV. Args: images_in_folder: Path to the folder with the input images. It should follow this structure: yoga_poses |__ downdog |______ 00000128.jpg |______ 00000181.bmp |______ ... |__ goddess |______ 00000243.jpg |______ 00000306.jpg |______ ... ... images_out_folder: Path to write the images overlay with detected landmarks. These images are useful when you need to debug accuracy issues. csvs_out_path: Path to write the CSV containing the detected landmark coordinates and label of each image that can be used to train a pose classification model. """ self._images_in_folder = images_in_folder self._images_out_folder = images_out_folder self._csvs_out_path = csvs_out_path self._messages = [] # Create a temp dir to store the pose CSVs per class self._csvs_out_folder_per_class = tempfile.mkdtemp() # Get list of pose classes and print image statistics self._pose_class_names = sorted( [n for n in os.listdir(self._images_in_folder) if not n.startswith('.')] ) def process(self, per_pose_class_limit=None, detection_threshold=0.1): """Preprocesses images in the given folder. Args: per_pose_class_limit: Number of images to load. As preprocessing usually takes time, this parameter can be specified to make the reduce of the dataset for testing. detection_threshold: Only keep images with all landmark confidence score above this threshold. """ # Loop through the classes and preprocess its images for pose_class_name in self._pose_class_names: print('Preprocessing', pose_class_name, file=sys.stderr) # Paths for the pose class. images_in_folder = os.path.join(self._images_in_folder, pose_class_name) images_out_folder = os.path.join(self._images_out_folder, pose_class_name) csv_out_path = os.path.join(self._csvs_out_folder_per_class, pose_class_name + '.csv') if not os.path.exists(images_out_folder): os.makedirs(images_out_folder) # Detect landmarks in each image and write it to a CSV file with open(csv_out_path, 'w') as csv_out_file: csv_out_writer = csv.writer(csv_out_file, delimiter=',', quoting=csv.QUOTE_MINIMAL) # Get list of images image_names = sorted( [n for n in os.listdir(images_in_folder) if not n.startswith('.')]) if per_pose_class_limit is not None: image_names = image_names[:per_pose_class_limit] valid_image_count = 0 # Detect pose landmarks from each image for image_name in tqdm.tqdm(image_names): image_path = os.path.join(images_in_folder, image_name) try: image = tf.io.read_file(image_path) image = tf.io.decode_jpeg(image) except: self._messages.append('Skipped ' + image_path + '. Invalid image.') continue else: image = tf.io.read_file(image_path) image = tf.io.decode_jpeg(image) image_height, image_width, channel = image.shape # Skip images that isn't RGB because Movenet requires RGB images if channel != 3: self._messages.append('Skipped ' + image_path + '. Image isn\'t in RGB format.') continue person = detect(image) # Save landmarks if all landmarks were detected min_landmark_score = min( [keypoint.score for keypoint in person.keypoints]) should_keep_image = min_landmark_score >= detection_threshold if not should_keep_image: self._messages.append('Skipped ' + image_path + '. No pose was confidentlly detected.') continue valid_image_count += 1 # Draw the prediction result on top of the image for debugging later output_overlay = draw_prediction_on_image( image.numpy().astype(np.uint8), person, close_figure=True, keep_input_size=True) # Write detection result into an image file output_frame = cv2.cvtColor(output_overlay, cv2.COLOR_RGB2BGR) cv2.imwrite(os.path.join(images_out_folder, image_name), output_frame) # Get landmarks and scale it to the same size as the input image pose_landmarks = np.array( [[keypoint.coordinate.x, keypoint.coordinate.y, keypoint.score] for keypoint in person.keypoints], dtype=np.float32) # Write the landmark coordinates to its per-class CSV file coordinates = pose_landmarks.flatten().astype(np.str).tolist() csv_out_writer.writerow([image_name] + coordinates) if not valid_image_count: raise RuntimeError( 'No valid images found for the "{}" class.' .format(pose_class_name)) # Print the error message collected during preprocessing. print('\n'.join(self._messages)) # Combine all per-class CSVs into a single output file all_landmarks_df = self._all_landmarks_as_dataframe() all_landmarks_df.to_csv(self._csvs_out_path, index=False) def class_names(self): """List of classes found in the training dataset.""" return self._pose_class_names def _all_landmarks_as_dataframe(self): """Merge all per-class CSVs into a single dataframe.""" total_df = None for class_index, class_name in enumerate(self._pose_class_names): csv_out_path = os.path.join(self._csvs_out_folder_per_class, class_name + '.csv') per_class_df = pd.read_csv(csv_out_path, header=None) # Add the labels per_class_df['class_no'] = [class_index]*len(per_class_df) per_class_df['class_name'] = [class_name]*len(per_class_df) # Append the folder name to the filename column (first column) per_class_df[per_class_df.columns[0]] = (os.path.join(class_name, '') + per_class_df[per_class_df.columns[0]].astype(str)) if total_df is None: # For the first class, assign its data to the total dataframe total_df = per_class_df else: # Concatenate each class's data into the total dataframe total_df = pd.concat([total_df, per_class_df], axis=0) list_name = [[bodypart.name + '_x', bodypart.name + '_y', bodypart.name + '_score'] for bodypart in BodyPart] header_name = [] for columns_name in list_name: header_name += columns_name header_name = ['file_name'] + header_name header_map = {total_df.columns[i]: header_name[i] for i in range(len(header_name))} total_df.rename(header_map, axis=1, inplace=True) return total_df #@title (Optional) Code snippet to try out the Movenet pose estimation logic #@markdown You can download an image from the internet, run the pose estimation logic on it and plot the detected landmarks on top of the input image. #@markdown *Note: This code snippet is also useful for debugging when you encounter an image with bad pose classification accuracy. You can run pose estimation on the image and see if the detected landmarks look correct or not before investigating the pose classification logic.* test_image_url = "https://cdn.pixabay.com/photo/2017/03/03/17/30/yoga-2114512_960_720.jpg" #@param {type:"string"} !wget -O /tmp/image.jpeg {test_image_url} if len(test_image_url): image = tf.io.read_file('/tmp/image.jpeg') image = tf.io.decode_jpeg(image) person = detect(image) _ = draw_prediction_on_image(image.numpy(), person, crop_region=None, close_figure=False, keep_input_size=True)
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Part 1: Preprocess the input imagesBecause the input for our pose classifier is the *output* landmarks from the MoveNet model, we need to generate our training dataset by running labeled images through MoveNet and then capturing all the landmark data and ground truth labels into a CSV file.The dataset we've provided for this tutorial is a CG-generated yoga pose dataset. It contains images of multiple CG-generated models doing 5 different yoga poses. The directory is already split into a `train` dataset and a `test` dataset.So in this section, we'll download the yoga dataset and run it through MoveNet so we can capture all the landmarks into a CSV file... **However, it takes about 15 minutes to feed our yoga dataset to MoveNet and generate this CSV file**. So as an alternative, you can download a pre-existing CSV file for the yoga dataset by setting `is_skip_step_1` parameter below to **True**. That way, you'll skip this step and instead download the same CSV file that will be created in this preprocessing step.On the other hand, if you want to train the pose classifier with your own image dataset, you need to upload your images and run this preprocessing step (leave `is_skip_step_1` **False**)—follow the instructions below to upload your own pose dataset.
is_skip_step_1 = False #@param ["False", "True"] {type:"raw"}
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
(Optional) Upload your own pose dataset
use_custom_dataset = False #@param ["False", "True"] {type:"raw"} dataset_is_split = False #@param ["False", "True"] {type:"raw"}
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
If you want to train the pose classifier with your own labeled poses (they can be any poses, not just yoga poses), follow these steps:1. Set the above `use_custom_dataset` option to **True**.2. Prepare an archive file (ZIP, TAR, or other) that includes a folder with your images dataset. The folder must include sorted images of your poses as follows. If you've already split your dataset into train and test sets, then set `dataset_is_split` to **True**. That is, your images folder must include "train" and "test" directories like this: ``` yoga_poses/ |__ train/ |__ downdog/ |______ 00000128.jpg |______ ... |__ test/ |__ downdog/ |______ 00000181.jpg |______ ... ``` Or, if your dataset is NOT split yet, then set `dataset_is_split` to **False** and we'll split it up based on a specified split fraction. That is, your uploaded images folder should look like this: ``` yoga_poses/ |__ downdog/ |______ 00000128.jpg |______ 00000181.jpg |______ ... |__ goddess/ |______ 00000243.jpg |______ 00000306.jpg |______ ... ```3. Click the **Files** tab on the left (folder icon) and then click **Upload to session storage** (file icon).4. Select your archive file and wait until it finishes uploading before you proceed.5. Edit the following code block to specify the name of your archive file and images directory. (By default, we expect a ZIP file, so you'll need to also modify that part if your archive is another format.)6. Now run the rest of the notebook.
#@markdown Be sure you run this cell. It's hiding the `split_into_train_test()` function that's called in the next code block. import os import random import shutil def split_into_train_test(images_origin, images_dest, test_split): """Splits a directory of sorted images into training and test sets. Args: images_origin: Path to the directory with your images. This directory must include subdirectories for each of your labeled classes. For example: yoga_poses/ |__ downdog/ |______ 00000128.jpg |______ 00000181.jpg |______ ... |__ goddess/ |______ 00000243.jpg |______ 00000306.jpg |______ ... ... images_dest: Path to a directory where you want the split dataset to be saved. The results looks like this: split_yoga_poses/ |__ train/ |__ downdog/ |______ 00000128.jpg |______ ... |__ test/ |__ downdog/ |______ 00000181.jpg |______ ... test_split: Fraction of data to reserve for test (float between 0 and 1). """ _, dirs, _ = next(os.walk(images_origin)) TRAIN_DIR = os.path.join(images_dest, 'train') TEST_DIR = os.path.join(images_dest, 'test') os.makedirs(TRAIN_DIR, exist_ok=True) os.makedirs(TEST_DIR, exist_ok=True) for dir in dirs: # Get all filenames for this dir, filtered by filetype filenames = os.listdir(os.path.join(images_origin, dir)) filenames = [os.path.join(images_origin, dir, f) for f in filenames if ( f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.bmp'))] # Shuffle the files, deterministically filenames.sort() random.seed(42) random.shuffle(filenames) # Divide them into train/test dirs os.makedirs(os.path.join(TEST_DIR, dir), exist_ok=True) os.makedirs(os.path.join(TRAIN_DIR, dir), exist_ok=True) test_count = int(len(filenames) * test_split) for i, file in enumerate(filenames): if i < test_count: destination = os.path.join(TEST_DIR, dir, os.path.split(file)[1]) else: destination = os.path.join(TRAIN_DIR, dir, os.path.split(file)[1]) shutil.copyfile(file, destination) print(f'Moved {test_count} of {len(filenames)} from class "{dir}" into test.') print(f'Your split dataset is in "{images_dest}"') if use_custom_dataset: # ATTENTION: # You must edit these two lines to match your archive and images folder name: # !tar -xf YOUR_DATASET_ARCHIVE_NAME.tar !unzip -q YOUR_DATASET_ARCHIVE_NAME.zip dataset_in = 'YOUR_DATASET_DIR_NAME' # You can leave the rest alone: if not os.path.isdir(dataset_in): raise Exception("dataset_in is not a valid directory") if dataset_is_split: IMAGES_ROOT = dataset_in else: dataset_out = 'split_' + dataset_in split_into_train_test(dataset_in, dataset_out, test_split=0.2) IMAGES_ROOT = dataset_out
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
**Note:** If you're using `split_into_train_test()` to split the dataset, it expects all images to be PNG, JPEG, or BMP—it ignores other file types. Download the yoga dataset
if not is_skip_step_1 and not use_custom_dataset: !wget -O yoga_poses.zip http://download.tensorflow.org/data/pose_classification/yoga_poses.zip !unzip -q yoga_poses.zip -d yoga_cg IMAGES_ROOT = "yoga_cg"
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Preprocess the `TRAIN` dataset
if not is_skip_step_1: images_in_train_folder = os.path.join(IMAGES_ROOT, 'train') images_out_train_folder = 'poses_images_out_train' csvs_out_train_path = 'train_data.csv' preprocessor = MoveNetPreprocessor( images_in_folder=images_in_train_folder, images_out_folder=images_out_train_folder, csvs_out_path=csvs_out_train_path, ) preprocessor.process(per_pose_class_limit=None)
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Preprocess the `TEST` dataset
if not is_skip_step_1: images_in_test_folder = os.path.join(IMAGES_ROOT, 'test') images_out_test_folder = 'poses_images_out_test' csvs_out_test_path = 'test_data.csv' preprocessor = MoveNetPreprocessor( images_in_folder=images_in_test_folder, images_out_folder=images_out_test_folder, csvs_out_path=csvs_out_test_path, ) preprocessor.process(per_pose_class_limit=None)
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Part 2: Train a pose classification model that takes the landmark coordinates as input, and output the predicted labels.You'll build a TensorFlow model that takes the landmark coordinates and predicts the pose class that the person in the input image performs. The model consists of two submodels:* Submodel 1 calculates a pose embedding (a.k.a feature vector) from the detected landmark coordinates.* Submodel 2 feeds pose embedding through several `Dense` layer to predict the pose class.You'll then train the model based on the dataset that were preprocessed in part 1. (Optional) Download the preprocessed dataset if you didn't run part 1
# Download the preprocessed CSV files which are the same as the output of step 1 if is_skip_step_1: !wget -O train_data.csv http://download.tensorflow.org/data/pose_classification/yoga_train_data.csv !wget -O test_data.csv http://download.tensorflow.org/data/pose_classification/yoga_test_data.csv csvs_out_train_path = 'train_data.csv' csvs_out_test_path = 'test_data.csv' is_skipped_step_1 = True
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Load the preprocessed CSVs into `TRAIN` and `TEST` datasets.
def load_pose_landmarks(csv_path): """Loads a CSV created by MoveNetPreprocessor. Returns: X: Detected landmark coordinates and scores of shape (N, 17 * 3) y: Ground truth labels of shape (N, label_count) classes: The list of all class names found in the dataset dataframe: The CSV loaded as a Pandas dataframe features (X) and ground truth labels (y) to use later to train a pose classification model. """ # Load the CSV file dataframe = pd.read_csv(csv_path) df_to_process = dataframe.copy() # Drop the file_name columns as you don't need it during training. df_to_process.drop(columns=['file_name'], inplace=True) # Extract the list of class names classes = df_to_process.pop('class_name').unique() # Extract the labels y = df_to_process.pop('class_no') # Convert the input features and labels into the correct format for training. X = df_to_process.astype('float64') y = keras.utils.to_categorical(y) return X, y, classes, dataframe
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Load and split the original `TRAIN` dataset into `TRAIN` (85% of the data) and `VALIDATE` (the remaining 15%).
# Load the train data X, y, class_names, _ = load_pose_landmarks(csvs_out_train_path) # Split training data (X, y) into (X_train, y_train) and (X_val, y_val) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.15) # Load the test data X_test, y_test, _, df_test = load_pose_landmarks(csvs_out_test_path)
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Define functions to convert the pose landmarks to a pose embedding (a.k.a. feature vector) for pose classificationNext, convert the landmark coordinates to a feature vector by:1. Moving the pose center to the origin.2. Scaling the pose so that the pose size becomes 13. Flattening these coordinates into a feature vectorThen use this feature vector to train a neural-network based pose classifier.
def get_center_point(landmarks, left_bodypart, right_bodypart): """Calculates the center point of the two given landmarks.""" left = tf.gather(landmarks, left_bodypart.value, axis=1) right = tf.gather(landmarks, right_bodypart.value, axis=1) center = left * 0.5 + right * 0.5 return center def get_pose_size(landmarks, torso_size_multiplier=2.5): """Calculates pose size. It is the maximum of two values: * Torso size multiplied by `torso_size_multiplier` * Maximum distance from pose center to any pose landmark """ # Hips center hips_center = get_center_point(landmarks, BodyPart.LEFT_HIP, BodyPart.RIGHT_HIP) # Shoulders center shoulders_center = get_center_point(landmarks, BodyPart.LEFT_SHOULDER, BodyPart.RIGHT_SHOULDER) # Torso size as the minimum body size torso_size = tf.linalg.norm(shoulders_center - hips_center) # Pose center pose_center_new = get_center_point(landmarks, BodyPart.LEFT_HIP, BodyPart.RIGHT_HIP) pose_center_new = tf.expand_dims(pose_center_new, axis=1) # Broadcast the pose center to the same size as the landmark vector to # perform substraction pose_center_new = tf.broadcast_to(pose_center_new, [tf.size(landmarks) // (17*2), 17, 2]) # Dist to pose center d = tf.gather(landmarks - pose_center_new, 0, axis=0, name="dist_to_pose_center") # Max dist to pose center max_dist = tf.reduce_max(tf.linalg.norm(d, axis=0)) # Normalize scale pose_size = tf.maximum(torso_size * torso_size_multiplier, max_dist) return pose_size def normalize_pose_landmarks(landmarks): """Normalizes the landmarks translation by moving the pose center to (0,0) and scaling it to a constant pose size. """ # Move landmarks so that the pose center becomes (0,0) pose_center = get_center_point(landmarks, BodyPart.LEFT_HIP, BodyPart.RIGHT_HIP) pose_center = tf.expand_dims(pose_center, axis=1) # Broadcast the pose center to the same size as the landmark vector to perform # substraction pose_center = tf.broadcast_to(pose_center, [tf.size(landmarks) // (17*2), 17, 2]) landmarks = landmarks - pose_center # Scale the landmarks to a constant pose size pose_size = get_pose_size(landmarks) landmarks /= pose_size return landmarks def landmarks_to_embedding(landmarks_and_scores): """Converts the input landmarks into a pose embedding.""" # Reshape the flat input into a matrix with shape=(17, 3) reshaped_inputs = keras.layers.Reshape((17, 3))(landmarks_and_scores) # Normalize landmarks 2D landmarks = normalize_pose_landmarks(reshaped_inputs[:, :, :2]) # Flatten the normalized landmark coordinates into a vector embedding = keras.layers.Flatten()(landmarks) return embedding
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Define a Keras model for pose classificationOur Keras model takes the detected pose landmarks, then calculates the pose embedding and predicts the pose class.
# Define the model inputs = tf.keras.Input(shape=(51)) embedding = landmarks_to_embedding(inputs) layer = keras.layers.Dense(128, activation=tf.nn.relu6)(embedding) layer = keras.layers.Dropout(0.5)(layer) layer = keras.layers.Dense(64, activation=tf.nn.relu6)(layer) layer = keras.layers.Dropout(0.5)(layer) outputs = keras.layers.Dense(5, activation="softmax")(layer) model = keras.Model(inputs, outputs) model.summary() model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) # Add a checkpoint callback to store the checkpoint that has the highest # validation accuracy. checkpoint_path = "weights.best.hdf5" checkpoint = keras.callbacks.ModelCheckpoint(checkpoint_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') earlystopping = keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20) # Start training history = model.fit(X_train, y_train, epochs=200, batch_size=16, validation_data=(X_val, y_val), callbacks=[checkpoint, earlystopping]) # Visualize the training history to see whether you're overfitting. plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['TRAIN', 'VAL'], loc='lower right') plt.show() # Evaluate the model using the TEST dataset loss, accuracy = model.evaluate(X_test, y_test)
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Draw the confusion matrix to better understand the model performance
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """Plots the confusion matrix.""" if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=55) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Classify pose in the TEST dataset using the trained model y_pred = model.predict(X_test) # Convert the prediction result to class name y_pred_label = [class_names[i] for i in np.argmax(y_pred, axis=1)] y_true_label = [class_names[i] for i in np.argmax(y_test, axis=1)] # Plot the confusion matrix cm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1)) plot_confusion_matrix(cm, class_names, title ='Confusion Matrix of Pose Classification Model') # Print the classification report print('\nClassification Report:\n', classification_report(y_true_label, y_pred_label))
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
(Optional) Investigate incorrect predictionsYou can look at the poses from the `TEST` dataset that were incorrectly predicted to see whether the model accuracy can be improved.Note: This only works if you have run step 1 because you need the pose image files on your local machine to display them.
if is_skip_step_1: raise RuntimeError('You must have run step 1 to run this cell.') # If step 1 was skipped, skip this step. IMAGE_PER_ROW = 3 MAX_NO_OF_IMAGE_TO_PLOT = 30 # Extract the list of incorrectly predicted poses false_predict = [id_in_df for id_in_df in range(len(y_test)) \ if y_pred_label[id_in_df] != y_true_label[id_in_df]] if len(false_predict) > MAX_NO_OF_IMAGE_TO_PLOT: false_predict = false_predict[:MAX_NO_OF_IMAGE_TO_PLOT] # Plot the incorrectly predicted images row_count = len(false_predict) // IMAGE_PER_ROW + 1 fig = plt.figure(figsize=(10 * IMAGE_PER_ROW, 10 * row_count)) for i, id_in_df in enumerate(false_predict): ax = fig.add_subplot(row_count, IMAGE_PER_ROW, i + 1) image_path = os.path.join(images_out_test_folder, df_test.iloc[id_in_df]['file_name']) image = cv2.imread(image_path) plt.title("Predict: %s; Actual: %s" % (y_pred_label[id_in_df], y_true_label[id_in_df])) plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) plt.show()
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Part 3: Convert the pose classification model to TensorFlow LiteYou'll convert the Keras pose classification model to the TensorFlow Lite format so that you can deploy it to mobile apps, web browsers and IoT devices. When converting the model, you'll apply [dynamic range quantization](https://www.tensorflow.org/lite/performance/post_training_quant) to reduce the pose classification TensorFlow Lite model size by about 4 times with insignificant accuracy loss.Note: TensorFlow Lite supports multiple quantization schemes. See the [documentation](https://www.tensorflow.org/lite/performance/model_optimization) if you are interested to learn more.
converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_model = converter.convert() print('Model size: %dKB' % (len(tflite_model) / 1024)) with open('pose_classifier.tflite', 'wb') as f: f.write(tflite_model)
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Then you'll write the label file which contains mapping from the class indexes to the human readable class names.
with open('pose_labels.txt', 'w') as f: f.write('\n'.join(class_names))
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
As you've applied quantization to reduce the model size, let's evaluate the quantized TFLite model to check whether the accuracy drop is acceptable.
def evaluate_model(interpreter, X, y_true): """Evaluates the given TFLite model and return its accuracy.""" input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # Run predictions on all given poses. y_pred = [] for i in range(len(y_true)): # Pre-processing: add batch dimension and convert to float32 to match with # the model's input data format. test_image = X[i: i + 1].astype('float32') interpreter.set_tensor(input_index, test_image) # Run inference. interpreter.invoke() # Post-processing: remove batch dimension and find the class with highest # probability. output = interpreter.tensor(output_index) predicted_label = np.argmax(output()[0]) y_pred.append(predicted_label) # Compare prediction results with ground truth labels to calculate accuracy. y_pred = keras.utils.to_categorical(y_pred) return accuracy_score(y_true, y_pred) # Evaluate the accuracy of the converted TFLite model classifier_interpreter = tf.lite.Interpreter(model_content=tflite_model) classifier_interpreter.allocate_tensors() print('Accuracy of TFLite model: %s' % evaluate_model(classifier_interpreter, X_test, y_test))
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
Now you can download the TFLite model (`pose_classifier.tflite`) and the label file (`pose_labels.txt`) to classify custom poses. See the [Android](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/android) and [Python/Raspberry Pi](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/raspberry_pi) sample app for an end-to-end example of how to use the TFLite pose classification model.
!zip pose_classifier.zip pose_labels.txt pose_classifier.tflite # Download the zip archive if running on Colab. try: from google.colab import files files.download('pose_classifier.zip') except: pass
_____no_output_____
Apache-2.0
site/en-snapshot/lite/tutorials/pose_classification.ipynb
Icecoffee2500/docs-l10n
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv('/content/sample_data/california_housing_test.csv') test = pd.read_csv('/content/sample_data/california_housing_train.csv') train.head() test.head() train.describe() train.hist(figsize=(15,13), grid=False, bins=50) plt.show() correlation = train.corr() plt.figure(figsize=(10,10)) sns.heatmap(correlation , annot=True) plt.show()
_____no_output_____
MIT
california_housing.ipynb
crazrycoin/Open-source
In this tutorial, we will learn how to plot a variable "Boundary layer height" for a particular output of WRF model.Referrence: https://wrf-python.readthedocs.io/en/latest/index.html 1. Import libraries
# Loading necessary libraries import numpy as np from netCDF4 import Dataset import matplotlib.pyplot as plt from matplotlib.cm import get_cmap import cartopy.crs as crs from cartopy.feature import NaturalEarthFeature from wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords, interplevel)
_____no_output_____
MIT
notebook/06 WRF Python - Boundary layer height plot.ipynb
sonnymetvn/Basic-Python-for-Meteorology
2. Download data
# specify where is the location of the data path_in = "data/" path_out = "./" # Open the NetCDF file ncfile = Dataset(path_in + 'wrfout_d01_2016-05-09_00^%00^%00')
_____no_output_____
MIT
notebook/06 WRF Python - Boundary layer height plot.ipynb
sonnymetvn/Basic-Python-for-Meteorology
3. Take out the variables
# Get the boundary layer height PBLH = getvar(ncfile, "PBLH") print(PBLH.dims)
('south_north', 'west_east')
MIT
notebook/06 WRF Python - Boundary layer height plot.ipynb
sonnymetvn/Basic-Python-for-Meteorology
4. Plotting
PBLH.plot()
_____no_output_____
MIT
notebook/06 WRF Python - Boundary layer height plot.ipynb
sonnymetvn/Basic-Python-for-Meteorology
This notebook shows you how to create and query a table or DataFrame loaded from data stored in Azure Blob storage.
from pyspark.sql.functions import lit from pyspark.sql.types import BinaryType,StringType from pyspark.sql import SparkSession
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Step 1: Set the data location and typeThere are two ways to access Azure Blob storage: account keys and shared access signatures (SAS).To get started, we need to set the location and type of the file.
file_location = "256_sampledata/"
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Step 2: Read the dataNow that we have specified our file metadata, we can create a DataFrame. Notice that we use an *option* to specify that we want to infer the schema from the file. We can also explicitly set this to a particular schema if we have one already.First, let's create a DataFrame in Python.
! ls -l "256_sampledata" # start Spark session: spark = SparkSession \ .builder \ .appName("Marhselling Image data") \ .config("spark.memory.offHeap.enabled",True) \ .config("spark.memory.offHeap.size","30g")\ .getOrCreate() spark.sql("set spark.sql.files.ignoreCorruptFiles=true") df = spark.read.format("binaryFile") \ .option("pathGlobFilter", "*.jpg") \ .option("recursiveFileLookup", "true") \ .load(file_location) df.printSchema() # Try image file type to learn about the schema: # we are NOT using this DF. image_df = spark.read.format("image") \ .option("pathGlobFilter", "*.jpg") \ .option("recursiveFileLookup", "true") \ .load(file_location) image_df.printSchema() image_df = None df.show(5) df.count()
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
preprocess 1. Extract labels 2. Extract size 3. transform labels to index Regex expressionNotice that every path file can be different, you will need to tweak the actual regex experssion to fit your file path. for that, take a look at an example of the file path and experiement with a [regex calculator](https://regexr.com/).
df.select("path").show(5, truncate=False) import io import numpy as np import pandas as pd import uuid from pyspark.sql.functions import col, pandas_udf, regexp_extract from PIL import Image def extract_label(path_col): """Extract label category number from file path using built-in sql function""" #([^/]+) return regexp_extract(path_col,"256_sampledata/([^/]+)",1) def extract_size(content): """Extract images size from its raw content""" image = Image.open(io.BytesIO(content)) return image.size @pandas_udf("width: int, height: int") def extract_size_udf(content_series): sizes = content_series.apply(extract_size) return pd.DataFrame(list(sizes)) images_w_label_size = df.select( col("path"), extract_label(col("path")).alias("label"), extract_size_udf(col("content")).alias("size"), col("content")) images_w_label_size.show(5)
+--------------------+-------------+------------+--------------------+ | path| label| size| content| +--------------------+-------------+------------+--------------------+ |file:/home/jovyan...| 249.yo-yo|{1500, 1500}|[FF D8 FF E0 00 1...| |file:/home/jovyan...|196.spaghetti| {630, 537}|[FF D8 FF E0 00 1...| |file:/home/jovyan...| 249.yo-yo|{1792, 1200}|[FF D8 FF E0 00 1...| |file:/home/jovyan...| 249.yo-yo|{2048, 1536}|[FF D8 FF E0 00 1...| |file:/home/jovyan...|196.spaghetti| {696, 806}|[FF D8 FF E0 00 1...| +--------------------+-------------+------------+--------------------+ only showing top 5 rows
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Transform label to index 1st way - the python way
labels = images_w_label_size.select(col("label")).distinct().collect() label_to_idx = {label: index for index,(label,) in enumerate(sorted(labels))} num_classes = len(label_to_idx) @pandas_udf("long") def get_label_idx(labels): return labels.map(lambda label: label_to_idx[label]) labels_idx = images_w_label_size.select( col("label"), get_label_idx(col("label")).alias("label_index"), col("content"), col("path"), col("size")) labels_idx.show(5)
+-------------+-----------+--------------------+--------------------+------------+ | label|label_index| content| path| size| +-------------+-----------+--------------------+--------------------+------------+ | 249.yo-yo| 3|[FF D8 FF E0 00 1...|file:/home/jovyan...|{1500, 1500}| |196.spaghetti| 0|[FF D8 FF E0 00 1...|file:/home/jovyan...| {630, 537}| | 249.yo-yo| 3|[FF D8 FF E0 00 1...|file:/home/jovyan...|{1792, 1200}| | 249.yo-yo| 3|[FF D8 FF E0 00 1...|file:/home/jovyan...|{2048, 1536}| |196.spaghetti| 0|[FF D8 FF E0 00 1...|file:/home/jovyan...| {696, 806}| +-------------+-----------+--------------------+--------------------+------------+ only showing top 5 rows
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
2nd way - the mllib way
from pyspark.ml.feature import StringIndexer indexer = StringIndexer(inputCol="label", outputCol="label_index") indexed = indexer.fit(images_w_label_size).transform(images_w_label_size) indexed.show(10) indexed.select("label_index").distinct().collect()
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
3rd way - from the label itself
def extract_index_from_label(label): """Extract index from label""" return regexp_extract(label,"^([^.]+)",1) labels_idx = images_w_label_size.select( col("label"), extract_index_from_label(col("label")).alias("label_index"), col("content"), col("path"), col("size")) labels_idx.show(5,truncate=False) images_w_label_size = None df = indexed labels_idx = None
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Step 3: Feature EngineeringExtracting greyscale images.Greyscale is used as an example of feature we might want to extract.
df.printSchema()
root |-- path: string (nullable = true) |-- label: string (nullable = true) |-- size: struct (nullable = true) | |-- width: integer (nullable = true) | |-- height: integer (nullable = true) |-- content: binary (nullable = true) |-- label_index: double (nullable = false)
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
calculate average image size for each category1. flat the column into two columns2. calculate average size for category3. resize according to average.
# 1st step - flatten the struact flattened = df.withColumn('width', col('size')['width']) flattened = flattened.withColumn('height', col('size')['height']) flattened.select('width','height').show(3, truncate = False) # 2 - calculate average size for category import pandas as pd from pyspark.sql.functions import pandas_udf from pyspark.sql import Window @pandas_udf("int") def pandas_mean(size: pd.Series) -> (int): return size.sum() flattened.select(pandas_mean(flattened['width'])).show() flattened.groupby("label").agg(pandas_mean(flattened['width'])).show() flattened.select(pandas_mean(flattened['width']).over(Window.partitionBy('label'))).show() flattened.select(pandas_mean(flattened['height'])).show() flattened.groupby("label").agg(pandas_mean(flattened['height'])).show() flattened.select(pandas_mean(flattened['height']).over(Window.partitionBy('label'))).show()
+------------------+ |pandas_mean(width)| +------------------+ | 165992| +------------------+ +-------------+------------------+ | label|pandas_mean(width)| +-------------+------------------+ |196.spaghetti| 39019| | 249.yo-yo| 40944| | 234.tweezer| 34513| | 212.teapot| 51516| +-------------+------------------+ +----------------------------------------------------------------+ |pandas_mean(width) OVER (PARTITION BY label unspecifiedframe$())| +----------------------------------------------------------------+ | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| | 39019| +----------------------------------------------------------------+ only showing top 20 rows +-------------------+ |pandas_mean(height)| +-------------------+ | 143843| +-------------------+ +-------------+-------------------+ | label|pandas_mean(height)| +-------------+-------------------+ |196.spaghetti| 33160| | 249.yo-yo| 37326| | 234.tweezer| 27628| | 212.teapot| 45729| +-------------+-------------------+ +-----------------------------------------------------------------+ |pandas_mean(height) OVER (PARTITION BY label unspecifiedframe$())| +-----------------------------------------------------------------+ | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| | 33160| +-----------------------------------------------------------------+ only showing top 20 rows
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Extract greyscale
# Sample python native function that can do additional processing - expects pandas df as input and returns pandas df as output. def add_grayscale_img(input_df): # Set up return frame. In this case I'll have a row per passed in row. You could be aggregating down to a single image, slicing # out columns,or just about anything, here. For this case, I am simply going to return the input_df with some extra columns. input_df['grayscale_image'] = input_df.content.apply(lambda image: get_image_bytes(Image.open(io.BytesIO(image)).convert('L'))) input_df['grayscale_format'] = "png" # Since this is a pandas df, this will assigne png to all rows return input_df def get_image_bytes(image): img_bytes = io.BytesIO() image.save(img_bytes,format="png") return img_bytes.getvalue() # Setup the return schema. Add blank columns to match the schema expected after applying the transformation function. Makes the schema definition easy in the function invocation. rtn_schema = (df.select('content','label','path') .withColumn('grayscale_image', lit(None).cast(BinaryType())) .withColumn('grayscale_format', lit(None).cast(StringType())) ) # Reduce df down to data used in the function, the groupBy, and the re-join key respectively. This could include other features as used by your pandas function limited_df = df.select('label','content','path') # Returns spark dataframe with transformations applied in parallel for each 'group' augmented_df = limited_df.groupBy('label').applyInPandas(add_grayscale_img, schema=rtn_schema.schema) # re-join to the full dataset using leftouter in case the image transform needed to skip some rows output_df = df.join(augmented_df.select('path','grayscale_image'),['path'],"leftouter")
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Test on small data
pd_df = limited_df.limit(5).toPandas() print(pd_df.columns) limited_df = None
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Make sure function works correctly
# Some testing code test_df = pd_df.copy() add_grayscale_img(test_df) print(test_df['grayscale_image']) from PIL import ImageFilter # Sample python native function that can do additional processing - expects pandas df as input and returns pandas df as output. def add_laplas(input_df): # Set up return frame. In this case I'll have a row per passed in row. You could be aggregating down to a single image, slicing # out columns,or just about anything, here. For this case, I am simply going to return the input_df with some extra columns. input_df['edges_image'] = input_df.grayscale_image.apply(lambda image: get_image_bytes(Image.open(io.BytesIO(image)).filter(ImageFilter.FIND_EDGES) )) return input_df # Some testing code add_laplas(test_df) print(test_df['edges_image']) print(test_df['path'][4]) test_df print(test_df.columns) # display one image import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np color_image = mpimg.imread(io.BytesIO(test_df.loc[1,'content']), format='jpg') image = mpimg.imread(io.BytesIO(test_df.loc[1,'grayscale_image']), format='png') edges_image = mpimg.imread(io.BytesIO(test_df.loc[1,'edges_image']), format='png') print('color dimensions = {}'.format(color_image.shape)) print('grayscale dimensions = {}'.format(image.shape)) row_count = test_df.count()[0] plt.figure(figsize=(8,20)) for label_index,row in test_df.iterrows(): (_,content,_,grayscale,_,_) = row color_image = mpimg.imread(io.BytesIO(content), format='jpg') image = mpimg.imread(io.BytesIO(grayscale), format='png') plt.subplot(row_count,2,label_index*2+1) plt.imshow(color_image) plt.subplot(row_count,2,label_index*2+2) plt.imshow(image,cmap='gray') #laplas kernel convolution plt.figure(figsize=(8,20)) for label_index,row in test_df.iterrows(): (_,content,_,grayscale,_,edges_image) = row edges_image = image = mpimg.imread(io.BytesIO(edges_image), format='png') plt.subplot(row_count,1,label_index*1+1) plt.imshow(edges_image,cmap='gray')
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Full Dataset
output_df.show(2, truncate=True) output_df.printSchema()
root |-- path: string (nullable = true) |-- label: string (nullable = true) |-- size: struct (nullable = true) | |-- width: integer (nullable = true) | |-- height: integer (nullable = true) |-- content: binary (nullable = true) |-- label_index: double (nullable = false) |-- grayscale_image: binary (nullable = true)
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Step 5: scale the imageFrom the size column, we notice that caltech_256 image size highly varay. To proced with the process, we need to scale the images to have a unannimous size. For tha we will use Spark UDFs with PIL.This is a must do part of normalizing and preprocessing image data.
from pyspark.sql.types import BinaryType, IntegerType from pyspark.sql.functions import udf img_size = 224 def scale_image(image_bytes): try: image = Image.open(io.BytesIO(image_bytes)).resize([img_size, img_size]) return image.tobytes() except: return None array = output_df.select("content").take(1) tmp_scale=scale_image(array[0].content) len(tmp_scale) from pyspark.sql.functions import udf scale_image_udf = udf(scale_image, BinaryType()) #image_df = output_df.select("label_index", scale_image_udf("content").alias("content")) image_df = output_df.select("label_index", scale_image_udf(col("content")).alias("image")) image_df.printSchema() image_df = image_df.select("label_index","image",col("image").alias("content")) image_df.printSchema() image_df =image_df.drop("image") image_df.printSchema()
root |-- label_index: double (nullable = false) |-- content: binary (nullable = true)
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Step 4: Save and Avoid small files problemSave the image data into a file format where you can query and process at scaleSaving the dataset with the greyscale. Repartition and save to **parquet**
# incase you are running on a distributed environment, with a large dataset, it's a good idea to partition t # save the data: save_path_augmented = "images_data/silver/augmented" # Images data is already compressed so we turn off parquet compression compression = spark.conf.get("spark.sql.parquet.compression.codec") spark.conf.set("spark.sql.parquet.compression.codec", "uncompressed") output_df.write.mode("overwrite").parquet(save_path_augmented) save_path_filtered = "images_data/silver/filtered" # parquet.block.size is for Petastorm, later image_df.repartition(2).write.mode("overwrite").option("parquet.block.size", 1024 * 1024).parquet(save_path_filtered) spark.conf.set("spark.sql.parquet.compression.codec", compression)
_____no_output_____
Apache-2.0
notebooks/ch04-05_Caltech256 - Loading and process Images Data.ipynb
adipolak/ml-with-apache-spark
Histogram* Create a histogram to visualize the most common salary ranges for employees.
# x_axis = sal_title_group_clean['salary'] # y_axis = plt.hist(emp_title_merged['salary'], color="red" ) plt.title('Salary Ranges for Employees') plt.xlabel('Salary Range ($)') plt.ylabel('Employee Count') plt.grid(alpha=0.5) plt.show() plt.tight_layout()
_____no_output_____
ADSL
EmployeeSQL/Working files/SQL BONUS.ipynb
key12pat34/SQL-challenge-hw7
Bar Chart* Create a bar chart of average salary by title.
x_axis = sal_title_group_clean['title'] y_axis = sal_title_group_clean['salary'] plt.bar(x_axis, y_axis, align = 'center', alpha=0.75, color = ['red','green','blue', 'black', 'orange', 'grey', 'purple']) plt.xticks(rotation = 'vertical') plt.title("Average Salary by Title") plt.xlabel("Employee Titles") plt.ylabel("Salaries ($)") plt.grid(alpha=0.25) plt.show() plt.tight_layout() plt.savefig()
_____no_output_____
ADSL
EmployeeSQL/Working files/SQL BONUS.ipynb
key12pat34/SQL-challenge-hw7
Exercícios
data = [[1,2,3],[4,5,6],[7,8,9]] list('CBA') list('ZYX') df = pd.DataFrame(data, list('zyx'), list('cba')) df df.sort_index() df.sort_index(axis = 1) df
_____no_output_____
MIT
Pandas/Dados/extras/extras/Organizando DataFrames (Sort).ipynb
lingsv/alura_ds
JWST Pipeline Validation Testing Notebook: Calwebb_detector1, reset step for MIRI **Instruments Affected**: MIRI Table of Contents [Imports](imports_ID) [Introduction](intro_ID) [Get Documentaion String for Markdown Blocks](markdown_from_docs) [Loading Data](data_ID) [Run JWST Pipeline](pipeline_ID) [Create Figure or Print Output](residual_ID) [About This Notebook](about_ID) ImportsList the library imports and why they are relevant to this notebook.* get_bigdata to retrieve data from artifactory* jwst.datamodels for building model for JWST Pipeline* jwst.module.PipelineStep is the pipeline step being tested* matplotlib.pyplot.plt to generate plot* numpy* inspect to get the docstring of our objects.* IPython.display for printing markdown output[Top of Page](title_ID)
from ci_watson.artifactory_helpers import get_bigdata import inspect from IPython.display import Markdown from jwst.dq_init import DQInitStep from jwst.reset import ResetStep from jwst.datamodels import RampModel import matplotlib.pyplot as plt import numpy as np
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
IntroductionFor this test we are using the reset step in the calwebb_detector1 pipeline. For MIRI exposures, the initial groups in each integration suffer from two effects related to the resetting of the detectors. The first effect is that the first few groups after a reset do not fall on the expected linear accumulation of signal. The most significant deviations ocurr in groups 1 and 2. This behavior is relatively uniform detector-wide. The second effect, on the other hand, is the appearance of significant extra spatial structure in these initial groups, before fading out in later groups. For more information on the pipeline step visit the links below. Step description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/reset/description.htmlPipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/reset Calibration WG Requested Algorithm: A short description and link to the page: https://outerspace.stsci.edu/pages/viewpage.action?spaceKey=JWSTCC&title=Vanilla+MIR+Reset+Anomaly+Correction Defining TermHere is where you will define terms or acronymns that may not be known a general audience (ie a new employee to the institute or an external user). For exampleJWST: James Webb Space TelescopeMIRI: Mid Infrared Instrument[Top of Page](title_ID) Get Documentaion String for Markdown Blocks
# Get raw python docstring raw = inspect.getdoc(ResetStep) # To convert to markdown, you need convert line breaks from \n to <br /> markdown_text = "<br />".join(raw.split("\n")) # Here you can format markdown as an output using the Markdown method. Markdown(""" # ResetStep --- {} """.format(markdown_text))
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
Loading DataThe data used to test this step is a dark data file taken as part of pre-launch ground testing. The original file name is MIRV00330001001P0000000002101_1_493_SE_2017-09-07T15h14m25.fits that was renamed to jw02201001001_01101_00001_MIRIMAGE_uncal.fits with a script that updates the file to put it in pipeline ready formatting.This is a dark data file with 40 frames and 4 integrations.[Top of Page](title_ID)
filename = get_bigdata('jwst_validation_notebooks', 'validation_data', 'reset', 'reset_miri_test', 'jw02201001001_01101_00001_MIRIMAGE_uncal.fits')
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
Run JWST PipelineTake the initial input file and run it through both dq_init and reset to get the before and after correction versions of the data to run.[Top of Page](title_ID)
preim = DQInitStep.call(filename) postim = ResetStep.call(preim)
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
Show plots and take statistics before and after correctionFor a specific pixel in the dark data:1. Plot the ramps before and after the correction to see if the initial frame values are more in line with the rest of the ramp.2. Fit a line to the ramps and calculate the slope and residuals. The slope should be closer to 0 and the residuals should be much smaller after the correction.3. Plot the residuals of a single integration before and after the correction to see if they are smaller.[Top of Page](title_ID)
# set input variables print('Shape of data cube: integrations, groups, ysize, xsize ',preim.shape) xval = 650 yval = 550 framenum = 20 # number of frames to plot (reset only corrects first few frames in cube) intsnum = 3 # number of integrations to plot (3 should show reset and not crowd) # put data into proper data models # read in images with RampModel(preim) as impre: # raises exception if file is not the correct model pass # read in image with RampModel(postim) as impost: # raises exception if file is not the correct model pass
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
First plot should show that after the correction, the drop at the early part of the ramp has evened out to resemble the data in the rest of the ramp.
# Plot frames vs. counts for a dark pixel before and after correction # loop through integrations for i in range(0, intsnum): # get locations of flagged pixels within the ramps ramp1 = impre.data[i, 0:framenum, yval, xval] ramp2 = impost.data[i, 0:framenum, yval, xval] # plot ramps of selected pixels plt.title('Frame values (DN) for a dark pixel') plt.xlabel('Frames') plt.ylabel('Counts (DN)') plt.plot(ramp1+i*10, label='int ' + str(i)) plt.plot(ramp2+i*10, label='int ' + str(i) + ' after reset') plt.legend(loc=4) plt.show()
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
Take a single pixel in the file, before and after the correction, and fit a line to them. After the correction, for a dark, the slope should be closer to zero and the residuals should be much lower.
# get array of frame numbers and choose ramps for selected pixel frames = np.arange(0, framenum) preramp = impre.data[0, 0:framenum, yval, xval] postramp = impost.data[0, 0:framenum, yval, xval] # get slopes of selected pixel before and after correction and see if it is more linear fit = np.polyfit(frames, preramp, 1, full=True) slopepre = fit[0][0] interceptpre = fit[0][1] residualspre = fit[1][0] fitpost = np.polyfit(frames, postramp, 1, full=True) slopepost = fitpost[0][0] interceptpost = fitpost[0][1] residualspost = fitpost[1][0] # look at slopes and variances print('The slope of the pixel before correction is: ', slopepre) print('The slope of the pixel after correction is: ', slopepost) print('The residuals of the pixel before correction are: ', residualspre) print('The residuals of the pixel after correction are: ', residualspost)
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
Plot the residuals for the linear fit before and after correction for the specified pixel to see if the plotted ramp is flatter after the correction.
# show line plus residual for 1st int yfit = np.polyval(fit[0], frames) yfitcorr = np.polyval(fitpost[0], frames) plt.title('Residuals for ramp (single pixel) before and after reset') plt.xlabel('Frames') plt.ylabel('Residual: linear fit - data') plt.plot(frames, yfit - preramp, label='raw variance') plt.plot(frames, yfitcorr - postramp, label='corrected variance') plt.legend() plt.show()
_____no_output_____
BSD-3-Clause
jwst_validation_notebooks/reset/jwst_reset_miri_test/jwst_reset_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
Basic Optimization
def f(x): return (x-3)**2 sp.optimize.minimize(f,2) sp.optimize.minimize(f,2).x sp.optimize.minimize(f,2).fun sp.optimize.minimize?
_____no_output_____
MIT
python/matplotlib/vector/basic/scipy.ipynb
karng87/nasm_game
$$ f(x,y) = (x-1)^2 + (y-2.5)^2 $$$$ x - 2y + 2 \geq 0 \\ -x - 2y + 6 \geq 0 \\ -x + 2y + 2 \geq 0 \\ x \geq 0 \\ y \geq 0$$
def f(x,y): return (x-1)**2 + (y-2.5)**2 def g(x,y): return x - 2*y + 2 def h(x,y): return -x - 2*y + 6 def k(x,y): return -x + 2*y +2 x = np.linspace(0,5,100) x,y = np.meshgrid(x,x) z = f(x,y) g = g(x,y) h = h(x,y) k = k(x,y) fig = plt.figure() ax = fig.add_subplot(projection='3d') ax.plot_surface(x,y,z,cmap='coolwarm',alpha=0.7) ax.plot_surface(x,y,g,alpha=0.2) ax.plot_surface(x,y,h,alpha=0.2) ax.plot_surface(x,y,k,alpha=0.2) #ax.scatter3D(x,y,z, c=z,cmap='coolwarm') ############################# #### optimize.minimize ###### ############################# # constraints # ineq = inequlity # bounds l = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 cons = ({'type':'ineq','fun':lambda x: x[0] - 2*x[1] + 2}, {'type':'ineq','fun':lambda x: -x[0] - 2*x[1] + 6}, {'type':'ineq','fun':lambda x: -x[0] + 2*x[1] + 2}) bnds = ((0,None),(0,None)) res = sp.optimize.minimize(l,(2,0), bounds=bnds, constraints=cons) z3 = f(res.x[0], res.x[1]) ############################### ax.scatter3D([res.x[0]],[res.x[1]],[f(res.x[0],res.x[1])])
_____no_output_____
MIT
python/matplotlib/vector/basic/scipy.ipynb
karng87/nasm_game
interpolate
x = np.linspace(0,10,10) y = x**2 * np.sin(x) fig = plt.figure() ax = fig.add_subplot() plt.scatter(x,y) f = sp.interpolate.interp1d(x,y,kind='linear') f = sp.interpolate.interp1d(x,y,kind='cubic') x_dense = np.linspace(0,10,100) y_dense = f(x_dense) ax.plot(x_dense,y_dense) def f(x): return x**2 +5 sp.integrate.quad(f,0,1) round(quad(f,0,1)[0], 2) quad(lambda x: x**2 + 5, 0,1) quad(lambda x:np.exp(-x**2)*np.cos(2*np.pi*x), -np.inf, np.inf) n = 1 quad(lambda x, n: np.exp(-n*x**2),0,np.inf,args=n) quad(lambda x, n: np.exp(-n*x**2),0,np.inf,args=n)[0] integrals = [quad(lambda x, n: np.exp(-n*x**2),0,np.inf,args=n)[0] for n in range(1,10)] integrals
_____no_output_____
MIT
python/matplotlib/vector/basic/scipy.ipynb
karng87/nasm_game
{glue:text}`nteract_github_org`**Activity from {glue:}`nteract_start` to {glue:}`nteract_stop`**
from datetime import date from dateutil.relativedelta import relativedelta from myst_nb import glue import seaborn as sns import pandas as pd import numpy as np import altair as alt from markdown import markdown from IPython.display import Markdown from ipywidgets.widgets import HTML, Tab from ipywidgets import widgets from datetime import timedelta from matplotlib import pyplot as plt import os.path as op from warnings import simplefilter simplefilter('ignore') # Altair config def author_url(author): return f"https://github.com/{author}" def alt_theme(): return { 'config': { 'axisLeft': { 'labelFontSize': 15, }, 'axisBottom': { 'labelFontSize': 15, }, } } alt.themes.register('my_theme', alt_theme) alt.themes.enable("my_theme") # Define colors we'll use for GitHub membership author_types = ['MEMBER', 'CONTRIBUTOR', 'COLLABORATOR', "NONE"] author_palette = np.array(sns.palettes.blend_palette(["lightgrey", "lightgreen", "darkgreen"], 4)) * 256 author_colors = ["rgb({}, {}, {})".format(*color) for color in author_palette] author_color_dict = {key: val for key, val in zip(author_types, author_palette)} github_org = "jupyterhub" top_n_repos = 15 n_days = 10 # Parameters github_org = "nteract" n_days = 90 ############################################################ # Variables stop = date.today() start = date.today() - relativedelta(days=n_days) # Strings for use in queries start_date = f"{start:%Y-%m-%d}" stop_date = f"{stop:%Y-%m-%d}" # Glue variables for use in markdown glue(f"{github_org}_github_org", github_org, display=False) glue(f"{github_org}_start", start_date, display=False) glue(f"{github_org}_stop", stop_date, display=False)
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Load dataLoad and clean up the data
from pathlib import Path path_data = Path("../data") comments = pd.read_csv(path_data.joinpath('comments.csv'), index_col=None).drop_duplicates() issues = pd.read_csv(path_data.joinpath('issues.csv'), index_col=None).drop_duplicates() prs = pd.read_csv(path_data.joinpath('prs.csv'), index_col=None).drop_duplicates() for idata in [comments, issues, prs]: idata.query("org == @github_org", inplace=True) # What are the top N repos, we will only plot these in the full data plots top_commented_repos = comments.groupby("repo").count().sort_values("createdAt", ascending=False)['createdAt'] use_repos = top_commented_repos.head(top_n_repos).index.tolist()
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Merged Pull requestsHere's an analysis of **merged pull requests** across each of the repositories in the Jupyterecosystem.
merged = prs.query('state == "MERGED" and closedAt > @start_date and closedAt < @stop_date') prs_by_repo = merged.groupby(['org', 'repo']).count()['author'].reset_index().sort_values(['org', 'author'], ascending=False) alt.Chart(data=prs_by_repo, title=f"Merged PRs in the last {n_days} days").mark_bar().encode( x=alt.X('repo', sort=prs_by_repo['repo'].values.tolist()), y='author', color='org' )
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Authoring and merging stats by repositoryLet's see who has been doing most of the PR authoring and merging. The PR author is generally theperson that implemented a change in the repository (code, documentation, etc). The PR merger isthe person that "pressed the green button" and got the change into the main codebase.
# Prep our merging DF merged_by_repo = merged.groupby(['repo', 'author'], as_index=False).agg({'id': 'count', 'authorAssociation': 'first'}).rename(columns={'id': "authored", 'author': 'username'}) closed_by_repo = merged.groupby(['repo', 'mergedBy']).count()['id'].reset_index().rename(columns={'id': "closed", "mergedBy": "username"}) charts = [] title = f"PR authors for {github_org} in the last {n_days} days" this_data = merged_by_repo.replace(np.nan, 0).groupby('username', as_index=False).agg({'authored': 'sum', 'authorAssociation': 'first'}) this_data = this_data.sort_values('authored', ascending=False) ch = alt.Chart(data=this_data, title=title).mark_bar().encode( x='username', y='authored', color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors)) ) ch charts = [] title = f"Merges for {github_org} in the last {n_days} days" ch = alt.Chart(data=closed_by_repo.replace(np.nan, 0), title=title).mark_bar().encode( x='username', y='closed', ) ch
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
IssuesIssues are **conversations** that happen on our GitHub repositories. Here's ananalysis of issues across the Jupyter organizations.
created = issues.query('state == "OPEN" and createdAt > @start_date and createdAt < @stop_date') closed = issues.query('state == "CLOSED" and closedAt > @start_date and closedAt < @stop_date') created_counts = created.groupby(['org', 'repo']).count()['number'].reset_index() created_counts['org/repo'] = created_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1) sorted_vals = created_counts.sort_values(['org', 'number'], ascending=False)['repo'].values alt.Chart(data=created_counts, title=f"Issues created in the last {n_days} days").mark_bar().encode( x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())), y='number', ) closed_counts = closed.groupby(['org', 'repo']).count()['number'].reset_index() closed_counts['org/repo'] = closed_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1) sorted_vals = closed_counts.sort_values(['number'], ascending=False)['repo'].values alt.Chart(data=closed_counts, title=f"Issues closed in the last {n_days} days").mark_bar().encode( x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())), y='number', ) created_closed = pd.merge(created_counts.rename(columns={'number': 'created'}).drop(columns='org/repo'), closed_counts.rename(columns={'number': 'closed'}).drop(columns='org/repo'), on=['org', 'repo'], how='outer') created_closed = pd.melt(created_closed, id_vars=['org', 'repo'], var_name="kind", value_name="count").replace(np.nan, 0) charts = [] # Pick the top 10 repositories top_repos = created_closed.groupby(['repo']).sum().sort_values(by='count', ascending=False).head(10).index ch = alt.Chart(created_closed.query('repo in @top_repos'), width=120).mark_bar().encode( x=alt.X("kind", axis=alt.Axis(labelFontSize=15, title="")), y=alt.Y('count', axis=alt.Axis(titleFontSize=15, labelFontSize=12)), color='kind', column=alt.Column("repo", header=alt.Header(title=f"Issue activity, last {n_days} days for {github_org}", titleFontSize=15, labelFontSize=12)) ) ch # Set to datetime for kind in ['createdAt', 'closedAt']: closed.loc[:, kind] = pd.to_datetime(closed[kind]) closed.loc[:, 'time_open'] = closed['closedAt'] - closed['createdAt'] closed.loc[:, 'time_open'] = closed['time_open'].dt.total_seconds() time_open = closed.groupby(['org', 'repo']).agg({'time_open': 'median'}).reset_index() time_open['time_open'] = time_open['time_open'] / (60 * 60 * 24) time_open['org/repo'] = time_open.apply(lambda a: a['org'] + '/' + a['repo'], axis=1) sorted_vals = time_open.sort_values(['org', 'time_open'], ascending=False)['repo'].values alt.Chart(data=time_open, title=f"Time to close for issues closed in the last {n_days} days").mark_bar().encode( x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())), y=alt.Y('time_open', title="Median Days Open"), )
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Most-upvoted issues
thumbsup = issues.sort_values("thumbsup", ascending=False).head(25) thumbsup = thumbsup[["title", "url", "number", "thumbsup", "repo"]] text = [] for ii, irow in thumbsup.iterrows(): itext = f"- ({irow['thumbsup']}) {irow['title']} - {irow['repo']} - [#{irow['number']}]({irow['url']})" text.append(itext) text = '\n'.join(text) HTML(markdown(text))
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Commenters across repositoriesThese are commenters across all issues and pull requests in the last several days.These are colored by the commenter's association with the organization. For informationabout what these associations mean, [see this StackOverflow post](https://stackoverflow.com/a/28866914/1927102).
commentors = ( comments .query("createdAt > @start_date and createdAt < @stop_date") .groupby(['org', 'repo', 'author', 'authorAssociation']) .count().rename(columns={'id': 'count'})['count'] .reset_index() .sort_values(['org', 'count'], ascending=False) ) n_plot = 50 charts = [] for ii, (iorg, idata) in enumerate(commentors.groupby(['org'])): title = f"Top {n_plot} commentors for {iorg} in the last {n_days} days" idata = idata.groupby('author', as_index=False).agg({'count': 'sum', 'authorAssociation': 'first'}) idata = idata.sort_values('count', ascending=False).head(n_plot) ch = alt.Chart(data=idata.head(n_plot), title=title).mark_bar().encode( x='author', y='count', color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors)) ) charts.append(ch) alt.hconcat(*charts)
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
First respondersFirst responders are the first people to respond to a new issue in one of the repositories.The following plots show first responders for recently-created issues.
first_comments = [] for (org, repo, issue_id), i_comments in comments.groupby(['org', 'repo', 'id']): ix_min = pd.to_datetime(i_comments['createdAt']).idxmin() first_comment = i_comments.loc[ix_min] if isinstance(first_comment, pd.DataFrame): first_comment = first_comment.iloc[0] first_comments.append(first_comment) first_comments = pd.concat(first_comments, axis=1).T # Make up counts for viz first_responder_counts = first_comments.groupby(['org', 'author', 'authorAssociation'], as_index=False).\ count().rename(columns={'id': 'n_first_responses'}).sort_values(['org', 'n_first_responses'], ascending=False) n_plot = 50 title = f"Top {n_plot} first responders for {github_org} in the last {n_days} days" idata = first_responder_counts.groupby('author', as_index=False).agg({'n_first_responses': 'sum', 'authorAssociation': 'first'}) idata = idata.sort_values('n_first_responses', ascending=False).head(n_plot) ch = alt.Chart(data=idata.head(n_plot), title=title).mark_bar().encode( x='author', y='n_first_responses', color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors)) ) ch
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Recent activity A list of merged PRs by projectBelow is a tabbed readout of recently-merged PRs. Check out the title to get an idea for what theyimplemented, and be sure to thank the PR author for their hard work!
tabs = widgets.Tab(children=[]) for ii, ((org, repo), imerged) in enumerate(merged.query("repo in @use_repos").groupby(['org', 'repo'])): merged_by = {} pr_by = {} issue_md = [] issue_md.append(f"#### Closed PRs for repo: [{org}/{repo}](https://github.com/{github_org}/{repo})") issue_md.append("") issue_md.append(f"##### ") for _, ipr in imerged.iterrows(): user_name = ipr['author'] user_url = author_url(user_name) pr_number = ipr['number'] pr_html = ipr['url'] pr_title = ipr['title'] pr_closedby = ipr['mergedBy'] pr_closedby_url = f"https://github.com/{pr_closedby}" if user_name not in pr_by: pr_by[user_name] = 1 else: pr_by[user_name] += 1 if pr_closedby not in merged_by: merged_by[pr_closedby] = 1 else: merged_by[pr_closedby] += 1 text = f"* [(#{pr_number})]({pr_html}): _{pr_title}_ by **[@{user_name}]({user_url})** merged by **[@{pr_closedby}]({pr_closedby_url})**" issue_md.append(text) issue_md.append('') markdown_html = markdown('\n'.join(issue_md)) children = list(tabs.children) children.append(HTML(markdown_html)) tabs.children = tuple(children) tabs.set_title(ii, repo) tabs
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
A list of recent issuesBelow is a list of issues with recent activity in each repository. If they seem of interestto you, click on their links and jump in to participate!
# Add comment count data to issues and PRs comment_counts = ( comments .query("createdAt > @start_date and createdAt < @stop_date") .groupby(['org', 'repo', 'id']) .count().iloc[:, 0].to_frame() ) comment_counts.columns = ['n_comments'] comment_counts = comment_counts.reset_index() n_plot = 5 tabs = widgets.Tab(children=[]) for ii, (repo, i_issues) in enumerate(comment_counts.query("repo in @use_repos").groupby('repo')): issue_md = [] issue_md.append("") issue_md.append(f"##### [{github_org}/{repo}](https://github.com/{github_org}/{repo})") top_issues = i_issues.sort_values('n_comments', ascending=False).head(n_plot) top_issue_list = pd.merge(issues, top_issues, left_on=['org', 'repo', 'id'], right_on=['org', 'repo', 'id']) for _, issue in top_issue_list.sort_values('n_comments', ascending=False).head(n_plot).iterrows(): user_name = issue['author'] user_url = author_url(user_name) issue_number = issue['number'] issue_html = issue['url'] issue_title = issue['title'] text = f"* [(#{issue_number})]({issue_html}): _{issue_title}_ by **[@{user_name}]({user_url})**" issue_md.append(text) issue_md.append('') md_html = HTML(markdown('\n'.join(issue_md))) children = list(tabs.children) children.append(HTML(markdown('\n'.join(issue_md)))) tabs.children = tuple(children) tabs.set_title(ii, repo) display(Markdown(f"Here are the top {n_plot} active issues in each repository in the last {n_days} days")) display(tabs)
_____no_output_____
BSD-3-Clause
monthly_update/generated/book/nteract.ipynb
choldgraf/jupyter-activity-snapshot
Title HeatMap Element Dependencies Matplotlib Backends Matplotlib Bokeh
import numpy as np import holoviews as hv hv.extension('matplotlib')
_____no_output_____
BSD-3-Clause
examples/reference/elements/matplotlib/HeatMap.ipynb
stuarteberg/holoviews
``HeatMap`` visualises tabular data indexed by two key dimensions as a grid of colored values. This allows spotting correlations in multivariate data and provides a high-level overview of how the two variables are plotted.The data for a ``HeatMap`` may be supplied as 2D tabular data with one or more associated value dimensions. The first value dimension will be colormapped, but further value dimensions may be revealed using the hover tool.
data = [(chr(65+i), chr(97+j), i*j) for i in range(5) for j in range(5) if i!=j] hv.HeatMap(data).sort()
_____no_output_____
BSD-3-Clause
examples/reference/elements/matplotlib/HeatMap.ipynb
stuarteberg/holoviews
It is important to note that the data should be aggregated before plotting as the ``HeatMap`` cannot display multiple values for one coordinate and will simply use the first value it finds for each combination of x- and y-coordinates.
heatmap = hv.HeatMap([(0, 0, 1), (0, 0, 10), (1, 0, 2), (1, 1, 3)]) heatmap + heatmap.aggregate(function=np.max)
_____no_output_____
BSD-3-Clause
examples/reference/elements/matplotlib/HeatMap.ipynb
stuarteberg/holoviews
As the above example shows before aggregating the second value for the (0, 0) is ignored unless we aggregate the data first.To reveal the values of a ``HeatMap`` we can enable a ``colorbar`` and if you wish to have interactive hover information, you can use the hover tool in the [Bokeh backend](../bokeh/HeatMap.ipynb):
heatmap = hv.HeatMap((np.random.randint(0, 10, 100), np.random.randint(0, 10, 100), np.random.randn(100), np.random.randn(100)), vdims=['z', 'z2']).redim.range(z=(-2, 2)) heatmap.opts(colorbar=True, fig_size=250)
_____no_output_____
BSD-3-Clause
examples/reference/elements/matplotlib/HeatMap.ipynb
stuarteberg/holoviews
決策樹學習 - 分類樹 (以RR Lyrae變星資料集為例)* [程式碼來源](http://www.astroml.org/book_figures/chapter9/fig_rrlyrae_decisiontree.htmlbook-fig-chapter9-fig-rrlyrae-decisiontree)
import numpy as np from matplotlib import pyplot as plt from sklearn.tree import DecisionTreeClassifier from astroML.datasets import fetch_rrlyrae_combined from astroML.utils import split_samples from astroML.utils import completeness_contamination #fetch_rrlyrae_combined? X, y = fetch_rrlyrae_combined() # 合併RR Lyrae變星和標準星的colors資訊 print('Features (u-g, g-r, r-i, i-z colors): ') print(X) print('Labels (標準星-0; RR Lyrae變星-1): ') print(y) X = X[:, [1, 0, 2, 3]] # rearrange columns for better 1-color results (X_train, X_test), (y_train, y_test) = split_samples(X, y, [0.75, 0.25], random_state=0) N_tot = len(y) N_st = np.sum(y == 0) N_rr = N_tot - N_st N_train = len(y_train) N_test = len(y_test) N_plot = 5000 + N_rr %matplotlib notebook # plot the results fig = plt.figure(figsize=(5, 2.5)) fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.0, left=0.1, right=0.95, wspace=0.2) # left plot: data and decision boundary ax = fig.add_subplot(121) im = ax.scatter(X[-N_plot:, 1], X[-N_plot:, 0], c=y[-N_plot:], s=4, lw=0, cmap=plt.cm.binary, zorder=2) im.set_clim(-0.5, 1) #ax.contour(xx, yy, Z, [0.5], colors='k') # ax.set_xlim(xlim) # ax.set_ylim(ylim) ax.set_xlabel('$u-g$') ax.set_ylabel('$g-r$') plt.show() # ax.text(0.02, 0.02, "depth = %i" % depths[1], # transform=ax.transAxes)
_____no_output_____
MIT
notebooks/notebooks4ML/DecisionTreeClassifier_RRLyraeExample.ipynb
Astrohackers-TW/IANCUPythonMeetup
Generating benchmark data with 2 covariates p=30
import pandas as pd import toytree as tt import numpy as np import anndata as ad import os import toyplot as tp import toyplot.svg import seaborn as sns import benchmarks.scripts.tree_data_generation as tgen # tree depth d = 5 effect_sizes = [0.3, 0.5, 0.7, 0.9] # number of effects num_effects = 3 # baseline parameter scale a_abs = 2 # sampling depth N = 10000 # dispersion theta = 499 # samples per group num_samples = [10] reps = 10 # counter through all datasets id = 0 dataset_path = os.path.abspath("../../../tascCODA_data/benchmarks/2_covariates/datasets/") print(dataset_path) # Want everything to be reproducible - set a seed at every block np.random.seed(96) p = 30 id = 0 newick = tgen.generate_tree_levels(p, d) tree = tt.tree(newick) tree.draw(tip_labels_align=True, node_sizes=10, node_labels='idx') np.random.seed(76) effect_nodes, effect_leaves = tgen.get_effect_nodes( newick, num_effects=num_effects, num_leaves=p ) print(f"nodes: {effect_nodes}") print(f"leaves: {effect_leaves}") tlc = ["red" if int(i) in effect_leaves else "blue" if int(i)==p-1 else "black" for i in tree.get_node_values("idx", 1, 1)[-p:]] tlc.reverse() ref_nodes = [p.idx for p in tree.idx_dict[p-1].get_ancestors()][:-1] ref_nodes.append(p-1) canvas = tp.Canvas(width=800, height=1600) ax0 = canvas.cartesian(bounds=(0, 700, 0, 1600), padding=0) tree.draw( # tip_labels=False, node_sizes=[20 for i in tree.get_node_values("name", 1, 1)], node_labels=[x for x in tree.get_node_values("idx", 1, 1)], node_colors=["lightcoral" if i in effect_nodes else "lightblue" if i in ref_nodes else "lightgrey" for i in tree.get_node_values("idx", 1, 1)], node_labels_style={"font-size": 10}, width=700, height=1600, node_style={"stroke": "black"}, axes=ax0, tip_labels="name", tip_labels_colors=tlc, ) # tp.svg.render(canvas, "./plots/benchmark_tree_30.svg") id = 0 x1_nodes = [39] x1_leaves = np.arange(13, 24, 1) beta_1 = np.zeros(p) beta_1[x1_leaves] = 3 np.random.seed(1234) for e in effect_sizes: for n in num_samples: for r in range(reps): mu_0, mu_1 = tgen.generate_mu( a_abs=a_abs, num_leaves=p, effect_nodes=effect_nodes, effect_leaves=effect_leaves, effect_size=e, newick=newick ) X = pd.DataFrame({"x_0": np.repeat([0,1], n), "x_1": np.random.uniform(0, 1, 2*n)}) Y = np.zeros((n*2, p)) for i in range(n): #Y[i, :] = np.sum(mu_0) * (mu_0 + beta_1*X.loc[i+n, "x_1"])/np.sum(mu_0 + beta_1*X.loc[i+n, "x_1"]) #Y[i+n, :] = np.sum(mu_1) * (mu_1 + beta_1*X.loc[i+n, "x_1"])/np.sum(mu_1 + beta_1*X.loc[i+n, "x_1"]) Y[i, :] = np.exp(np.log(mu_0) + beta_1*X.loc[i, "x_1"]) Y[i+n, :] = np.exp(np.log(mu_1) + beta_1*X.loc[i+n, "x_1"]) X = X.astype(np.float64) Y = Y.astype(np.float64) test_data = ad.AnnData( X=Y, obs=X, uns={ "tree_newick": newick, "effect_nodes": effect_nodes, "effect_leaves": effect_leaves, "effect_size": e, "num_samples": n, } ) # test_data.write_h5ad(dataset_path + f"/data_{id}") id += 1
/Users/johannes.ostner/opt/anaconda3/envs/scCODA_3/lib/python3.8/site-packages/anndata/_core/anndata.py:120: ImplicitModificationWarning: Transforming to str index. warnings.warn("Transforming to str index.", ImplicitModificationWarning)
BSD-3-Clause
benchmarks/2_covariates/generate_data_2_covariates.ipynb
bio-datascience/tascCODA_reproducibility
GANs
%matplotlib inline from fastai.gen_doc.nbdoc import * from fastai import * from fastai.vision import * from fastai.vision.gan import *
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
GAN stands for [Generative Adversarial Nets](https://arxiv.org/pdf/1406.2661.pdf) and were invented by Ian Goodfellow. The concept is that we will train two models at the same time: a generator and a critic. The generator will try to make new images similar to the ones in our dataset, and the critic's job will try to classify real images from the fake ones the generator does. The generator returns images, the discriminator a feature map (it can be a single number depending on the input size). Usually the discriminator will be trained to retun 0. everywhere for fake images and 1. everywhere for real ones.This module contains all the necessary function to create a GAN. We train them against each other in the sense that at each step (more or less), we:1. Freeze the generator and train the discriminator for one step by: - getting one batch of true images (let's call that `real`) - generating one batch of fake images (let's call that `fake`) - have the discriminator evaluate each batch and compute a loss function from that; the important part is that it rewards positively the detection of real images and penalizes the fake ones - update the weights of the discriminator with the gradients of this loss 2. Freeze the discriminator and train the generator for one step by: - generating one batch of fake images - evaluate the discriminator on it - return a loss that rewards posisitivly the discriminator thinking those are real images; the important part is that it rewards positively the detection of real images and penalizes the fake ones - update the weights of the generator with the gradients of this loss
show_doc(GANLearner)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
This is the general constructor to create a GAN, you might want to use one of the factory methods that are easier to use. Create a GAN from [`data`](/vision.data.htmlvision.data), a `generator` and a `critic`. The [`data`](/vision.data.htmlvision.data) should have the inputs the `generator` will expect and the images wanted as targets.`gen_loss_func` is the loss function that will be applied to the `generator`. It takes three argument `fake_pred`, `target`, `output` and should return a rank 0 tensor. `output` is the result of the `generator` applied to the input (the xs of the batch), `target` is the ys of the batch and `fake_pred` is the result of the `discriminator` being given `output`. `output`and `target` can be used to add a specific loss to the GAN loss (pixel loss, feature loss) and for a good training of the gan, the loss should encourage `fake_pred` to be as close to 1 as possible (the `generator` is trained to fool the `critic`).`crit_loss_func` is the loss function that will be applied to the `critic`. It takes two arguments `real_pred` and `fake_pred`. `real_pred` is the result of the `critic` on the target images (the ys of the batch) and `fake_pred` is the result of the `critic` applied on a batch of fake, generated byt the `generator` from the xs of the batch.`switcher` is a [`Callback`](/callback.htmlCallback) that should tell the GAN when to switch from critic to generator and vice versa. By default it does 5 iterations of the critic for 1 iteration of the generator. The model begins the training with the `generator` if `gen_first=True`. If `switch_eval=True`, the model that isn't trained is switched on eval mode (left in training mode otherwise, which means some statistics like the running mean in batchnorm layers are updated, or the dropouts are applied).`clip` should be set to a certain value if one wants to clip the weights (see the [Wassertein GAN](https://arxiv.org/pdf/1701.07875.pdf) for instance).If `show_img=True`, one image generated by the GAN is shown at the end of each epoch. Factory methods
show_doc(GANLearner.from_learners)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Directly creates a [`GANLearner`](/vision.gan.htmlGANLearner) from two [`Learner`](/basic_train.htmlLearner): one for the `generator` and one for the `critic`. The `switcher` and all `kwargs` will be passed to the initialization of [`GANLearner`](/vision.gan.htmlGANLearner) along with the following loss functions:- `loss_func_crit` is the mean of `learn_crit.loss_func` applied to `real_pred` and a target of ones with `learn_crit.loss_func` applied to `fake_pred` and a target of zeros- `loss_func_gen` is the mean of `learn_crit.loss_func` applied to `fake_pred` and a target of ones (to full the discriminator) with `learn_gen.loss_func` applied to `output` and `target`. The weights of each of those contributions can be passed in `weights_gen` (default is 1. and 1.)
show_doc(GANLearner.wgan)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
The Wasserstein GAN is detailed in [this article]. `switcher` and the `kwargs` will be passed to the [`GANLearner`](/vision.gan.htmlGANLearner) init, `clip`is the weight clipping. Switchers In any GAN training, you will need to tell the [`Learner`](/basic_train.htmlLearner) when to switch from generator to critic and vice versa. The two following [`Callback`](/callback.htmlCallback) are examples to help you with that.As usual, don't call the `on_something` methods directly, the fastai library will do it for you during training.
show_doc(FixedGANSwitcher, title_level=3) show_doc(FixedGANSwitcher.on_train_begin) show_doc(FixedGANSwitcher.on_batch_end) show_doc(AdaptiveGANSwitcher, title_level=3) show_doc(AdaptiveGANSwitcher.on_batch_end)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Discriminative LR If you want to train your critic at a different learning rate than the generator, this will let you do it automatically (even if you have a learning rate schedule).
show_doc(GANDiscriminativeLR, title_level=3) show_doc(GANDiscriminativeLR.on_batch_begin) show_doc(GANDiscriminativeLR.on_step_end)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Specific models
show_doc(basic_critic)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
This model contains a first 4 by 4 convolutional layer of stride 2 from `n_channels` to `n_features` followed by `n_extra_layers` 3 by 3 convolutional layer of stride 1. Then we put as many 4 by 4 convolutional layer of stride 2 with a number of features multiplied by 2 at each stage so that the `in_size` becomes 1. `kwargs` can be used to customize the convolutional layers and are passed to [`conv_layer`](/layers.htmlconv_layer).
show_doc(basic_generator)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
This model contains a first 4 by 4 transposed convolutional layer of stride 1 from `noise_size` to the last numbers of features of the corresponding critic. Then we put as many 4 by 4 transposed convolutional layer of stride 2 with a number of features divided by 2 at each stage so that the image ends up being of height and widht `in_size//2`. At the end, we add`n_extra_layers` 3 by 3 convolutional layer of stride 1. The last layer is a transpose convolution of size 4 by 4 and stride 2 followed by `tanh`. `kwargs` can be used to customize the convolutional layers and are passed to [`conv_layer`](/layers.htmlconv_layer).
show_doc(gan_critic) show_doc(GANTrainer)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
[`LearnerCallback`](/basic_train.htmlLearnerCallback) that will be responsible to handle the two different optimizers (one for the generator and one for the critic), and do all the work behind the scenes so that the generator (or the critic) are in training mode with parameters requirement gradients each time we switch.`switch_eval=True` means that the [`GANTrainer`](/vision.gan.htmlGANTrainer) will put the model that isn't training into eval mode (if it's `False` its running statistics like in batchnorm layers will be updated and dropout will be applied). `clip` is the clipping applied to the weights (if not `None`). `beta` is the coefficient for the moving averages as the [`GANTrainer`](/vision.gan.htmlGANTrainer)tracks separately the generator loss and the critic loss. `gen_first=True` means the training begins with the generator (with the critic if it's `False`). If `show_img=True` we show a generated image at the end of each epoch.
show_doc(GANTrainer.switch)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
If `gen_mode` is left as `None`, just put the model in the other mode (critic if it was in generator mode and vice versa).
show_doc(GANTrainer.on_train_begin) show_doc(GANTrainer.on_epoch_begin) show_doc(GANTrainer.on_batch_begin) show_doc(GANTrainer.on_backward_begin) show_doc(GANTrainer.on_epoch_end) show_doc(GANTrainer.on_train_end)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Specific modules
show_doc(GANModule, title_level=3)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
If `gen_mode` is left as `None`, just put the model in the other mode (critic if it was in generator mode and vice versa).
show_doc(GANModule.switch) show_doc(GANLoss, title_level=3) show_doc(AdaptiveLoss, title_level=3) show_doc(accuracy_thresh_expand)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Data Block API
show_doc(NoisyItem, title_level=3) show_doc(GANItemList, title_level=3)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Inputs will be [`NoisyItem`](/vision.gan.htmlNoisyItem) of `noise_sz` while the default class for target is [`ImageItemList`](/vision.data.htmlImageItemList).
show_doc(GANItemList.show_xys) show_doc(GANItemList.show_xyzs)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
Undocumented Methods - Methods moved below this line will intentionally be hidden
show_doc(GANLoss.critic) show_doc(GANModule.forward) show_doc(GANLoss.generator) show_doc(NoisyItem.apply_tfms) show_doc(AdaptiveLoss.forward) show_doc(GANItemList.get) show_doc(GANItemList.reconstruct) show_doc(AdaptiveLoss.forward)
_____no_output_____
Apache-2.0
docs_src/vision.gan.ipynb
navjotts/fastai
解析庫
BeautifulSoup(markup, "html.parser") BeautifulSoup(markup, "lxml") BeautifulSoup(markup, "xml") BeautifulSoup(markup, "html5lib")
_____no_output_____
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
基本使用
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'html.parser') print(soup.prettify()) #會把html漂亮輸出 print(soup.title.string)
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta content="text/html; charset=utf-8" http-equiv="Content-Type"/> <title> NTU Mail-臺灣大學電子郵件系統 </title> <link href="images/style.css" rel="stylesheet" type="text/css"/> </head> <body> <div id="top"> | <a href="http://www.ntu.edu.tw/"> 臺大首頁 NTU Home </a> | <a href="http://www.cc.ntu.edu.tw/"> 計中首頁 </a> | </div> <div id="wrapper"> <div id="banner"> </div> <div id="mail"> <div id="imgcss"> <img src="images/mail20.png"/> </div> <div id="content"> <h1> <a href="https://mail.ntu.edu.tw/"> NTU Mail 2.0 </a> </h1> <ul> <li> <img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li> 教職員帳號 \ Faculty Account </li> <li> 公務、計畫、及短期帳號 \ Project and Short Term Account </li> <li> 所有在學學生帳號 \ Internal Student Account </li> </ol> </li> <li> <img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/"> Mail 2.0 </a> </li> <li> <img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/"> Mail 2.0 FAQ </a> </li> </ul> </div> <!--content end--> </div> <!--mail end--> <div id="webmail"> <div id="imgcss"> <img src="images/webmail.png"/> </div> <div id="content"> <h1> <a href="http://webmail.ntu.edu.tw/"> NTU Mail 1.0 (Webmail 1.0) </a> </h1> <ul> <li> <img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li> 校友帳號 \ Alumni Account </li> <li> 醫院員工帳號 \ Hospital Staff Account </li> </ol> </li> <li> <img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="http://webmail.ntu.edu.tw/"> Webmail 1.0 </a> </li> <li> <img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://jsc.cc.ntu.edu.tw/ntucc/email/"> Webmail FAQ </a> </li> </ul> </div> <!--content end--> </div> <!--webmail end--> </div> <!--wrapper end--> <div id="footer"> Copyright 臺灣大學 National Taiwan University <br/> 諮詢服務電話:(02)3366-5022或3366-5023 <br/> 諮詢服務信箱:[email protected] </div> </body> </html> NTU Mail-臺灣大學電子郵件系統
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping