markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Searching for bouts for a day of ephys recording- microphone wav file is first exported in sglx_pipe-dev-sort-bouts-s_b1253_21-20210614- bouts are extracted in searchbout_s_b1253_21-ephys
import os import glob import socket import logging import pickle import numpy as np import pandas as pd from scipy.io import wavfile from scipy import signal ### Fuck matplotlib, I'm using poltly now from plotly.subplots import make_subplots import plotly.graph_objects as go from importlib import reload logger = logging.getLogger() handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) logger.info('Running on {}'.format(socket.gethostname())) from ceciestunepipe.file import bcistructure as et from ceciestunepipe.util.sound import boutsearch as bs
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
Get the file locations for a session (day) of recordings
reload(et) sess_par = {'bird': 's_b1253_21', 'sess': '2021-07-18', 'sort': 2} exp_struct = et.get_exp_struct(sess_par['bird'], sess_par['sess'], ephys_software='sglx') raw_folder = exp_struct['folders']['sglx'] derived_folder = exp_struct['folders']['derived'] bouts_folder = os.path.join(derived_folder, 'bouts_ceciestunepipe') sess_bouts_file = os.path.join(bouts_folder, 'bout_sess_auto.pickle') sess_bouts_curated_file = os.path.join(bouts_folder, 'bout_curated.pickle') os.makedirs(bouts_folder, exist_ok=True) exp_struct['folders']
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
load concatenated the files of the session
def read_session_auto_bouts(exp_struct): # list all files of the session # read into list of pandas dataframes and concatenate # read the search parameters of the first session # return the big pd and the search params derived_folder = exp_struct['folders']['derived'] bout_pd_files = et.get_sgl_files_epochs(derived_folder, file_filter='bout_auto.pickle') search_params_files = et.get_sgl_files_epochs(derived_folder, file_filter='bout_search_params.pickle') print(bout_pd_files) hparams=None with open(search_params_files[0], 'rb') as fh: hparams = pickle.load(fh) bout_pd = pd.concat([pd.read_pickle(p) for p in bout_pd_files[:]]) return bout_pd, hparams bout_pd, hparams = read_session_auto_bouts(exp_struct) bout_pd['file'].values
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
if it wasnt saved (which is a bad mistake), read the sampling rate from the first file in the session
def sample_rate_from_wav(wav_path): x, sample_rate = wavfile.read(wav_path) return sample_rate if hparams['sample_rate'] is None: one_wav_path = bpd.loc[0, 'file'] logger.info('Sample rate not saved in parameters dict, searching it in ' + one_wav_path) hparams['sample_rate'] = sample_rate_from_wav(one_wav_path) def cleanup(bout_pd: pd.DataFrame): ## check for empty waveforms (how woudld THAT happen???) bout_pd['valid_waveform'] = bout_pd['waveform'].apply(lambda x: (False if x.size==0 else True)) # valid is & of all the validated criteria bout_pd['valid'] = bout_pd['valid_waveform'] ## fill in the epoch bout_pd['epoch'] = bout_pd['file'].apply(lambda x: et.split_path(x)[-2]) # drop not valid and reset index bout_pd.drop(bout_pd[bout_pd['valid']==False].index, inplace=True) bout_pd.reset_index(drop=True, inplace=True) # set all to 'confusing' (unchecked) and 'bout_check' false (not a bout) bout_pd['confusing'] = True bout_pd['bout_check'] = False cleanup(bout_pd) bout_pd reload(et)
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
compute the spectrograms
bout_pd['spectrogram'] = bout_pd['waveform'].apply(lambda x: bs.gimmepower(x, hparams)[2]) logger.info('saving bout pandas with spectrogram to ' + sess_bouts_file) bout_pd.to_pickle(sess_bouts_file) bout_pd.head(2) bout_pd['file'][0]
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
inspect the bouts and curate them visualize one bout
bout_pd.iloc[0] import plotly.express as px import plotly.graph_objects as go from ipywidgets import widgets def viz_one_bout(df: pd.Series, sub_sample=1): # get the power and the spectrogram sxx = df['spectrogram'][:, ::sub_sample] x = df['waveform'][::sub_sample] # the trace tr_waveform = go.Scatter(y=x) figwidg_waveform = go.FigureWidget(data=[tr_waveform], layout= {'height': 300,'width':1000}) # the spectrogram fig_spectrogram = px.imshow(sxx, labels={}, color_continuous_scale='Inferno', aspect='auto') fig_spectrogram.update_layout(width=1000, height=300, coloraxis_showscale=False) fig_spectrogram.update_xaxes(showticklabels=False) fig_spectrogram.update_yaxes(showticklabels=False) figwidg_spectrogram = go.FigureWidget(fig_spectrogram) display(widgets.VBox([figwidg_waveform, figwidg_spectrogram])) viz_one_bout(bout_pd.iloc[24]) bout_pd.head(2)
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
use it in a widget add a 'confusing' label, for not/sure/mixed.we want to avoid having things we are not sure of in the training dataset
bout_pd.reset_index(drop=True, inplace=True) ## Set confusing by default, will only be False once asserted bout/or not bout_pd['confusing'] = True bout_pd['bout_check'] = False ### Create a counter object (count goes 1:1 to DataFrame index) from traitlets import CInt, link class Counter(widgets.DOMWidget): value = CInt(0) value.tag(sync=True) class VizBout(): def __init__(self, hparams, bouts_pd): self.bout = None self.bouts_pd = bouts_pd self.bout_series = None self.is_bout = None self.is_confusing = None self.bout_counter = None self.bout_id = None self.buttons = {} self.m_pick = None self.fig_waveform = None self.fig_spectrogram = None self.figwidg_waveform = None self.figwidg_spectrogram = None self.fig_width = 2 self.sub_sample = 10 self.x = None self.sxx = None self.tr_waveform = None self.s_f = hparams['sample_rate'] self.init_fig() self.init_widget() self.show() def init_fig(self): # the trace self.tr_waveform = go.Scatter(y=np.zeros(500)) self.figwidg_waveform = go.FigureWidget(data=[self.tr_waveform], layout={'width': 1000, 'height':300}) # the spectrogram self.fig_spectrogram = px.imshow(np.random.rand(500, 500), labels={}, color_continuous_scale='Inferno', aspect='auto') self.fig_spectrogram.update_layout(width=1000, height=300, coloraxis_showscale=False) self.fig_spectrogram.update_xaxes(showticklabels=False) self.fig_spectrogram.update_yaxes(showticklabels=False) self.figwidg_spectrogram = go.FigureWidget(self.fig_spectrogram) def init_widget(self): # declare elements # lay them out # self.bout_counter = Counter() self.is_bout = widgets.Checkbox(description='is bout') self.is_confusing = widgets.Checkbox(description='Not sure or mixed') self.buttons['Next'] = widgets.Button(description="Next", button_style='info', icon='plus') self.buttons['Prev'] = widgets.Button(description="Prev", button_style='warning', icon='minus') self.buttons['Check'] = widgets.Button(description="Check", button_style='success', icon='check') self.buttons['Uncheck'] = widgets.Button(description="Uncheck", button_style='danger', icon='wrong') [b.on_click(self.button_click) for b in self.buttons.values()] left_box = widgets.VBox([self.buttons['Prev'], self.buttons['Uncheck']]) right_box = widgets.VBox([self.buttons['Next'], self.buttons['Check']]) button_box = widgets.HBox([left_box, right_box]) self.m_pick = widgets.IntSlider(value=0, min=0, max=self.bouts_pd.index.size-1,step=1, description="Bout candidate index") control_box = widgets.HBox([button_box, widgets.VBox([self.is_bout, self.is_confusing]), self.m_pick]) link((self.m_pick, 'value'), (self.bout_counter, 'value')) self.update_bout() self.is_bout.observe(self.bout_checked, names='value') self.is_confusing.observe(self.confusing_checked, names='value') self.m_pick.observe(self.slider_change, names='value') all_containers = widgets.VBox([control_box, self.figwidg_waveform, self.figwidg_spectrogram]) display(all_containers) # display(button_box) # display(self.m_pick) # display(self.is_bout) # display(self.fig) def button_click(self, button): self.bout_id = self.bout_counter.value curr_bout = self.bout_counter if button.description == 'Next': curr_bout.value += 1 elif button.description == 'Prev': curr_bout.value -= 1 elif button.description == 'Check': self.bouts_pd.loc[self.bout_id, 'bout_check'] = True self.bouts_pd.loc[self.bout_id, 'confusing'] = False curr_bout.value += 1 elif button.description == 'Uncheck': self.bouts_pd.loc[self.bout_id, 'bout_check'] = False self.bouts_pd.loc[self.bout_id, 'confusing'] = False curr_bout.value += 1 # handle the edges of the counter if curr_bout.value > self.m_pick.max: curr_bout.value = 0 if curr_bout.value < self.m_pick.min: curr_bout.value = self.m_pick.max def slider_change(self, change): #logger.info('slider changed') #self.bout_counter = change.new #clear_output(True) self.update_bout() self.show() def bout_checked(self, bc): # print "bout checked" # print bc['new'] # print self.motiff self.bouts_pd.loc[self.bout_id, 'bout_check'] = bc['new'] def confusing_checked(self, bc): # print "bout checked" # print bc['new'] # print self.motiff self.bouts_pd.loc[self.bout_id, 'confusing'] = bc['new'] def update_bout(self): self.bout_id = self.bout_counter.value self.bout_series = self.bouts_pd.iloc[self.bout_id] self.is_bout.value = bool(self.bout_series['bout_check']) self.is_confusing.value = bool(self.bout_series['confusing']) self.x = self.bout_series['waveform'][::self.sub_sample] self.sxx = self.bout_series['spectrogram'][::self.sub_sample] def show(self): #self.fig.clf() #self.init_fig() # update # self.update_bout() #plot #logger.info('showing') # Show the figures with self.figwidg_waveform.batch_update(): self.figwidg_waveform.data[0].y = self.x self.figwidg_waveform.data[0].x = np.arange(self.x.size) * self.sub_sample / self.s_f with self.figwidg_spectrogram.batch_update(): self.figwidg_spectrogram.data[0].z = np.sqrt(self.sxx[::-1]) viz_bout = VizBout(hparams, bout_pd) np.where(viz_bout.bouts_pd['bout_check']==True)[0].size
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
save it
hparams ### get the curated file path ##save to the curated file path viz_bout.bouts_pd.to_pickle(sess_bouts_curated_file) logger.info('saved curated bout pandas to pickle {}'.format(sess_bouts_curated_file)) viz_bout.bouts_pd['file'][0] viz_bout.bouts_pd.head(5)
_____no_output_____
MIT
notebooks/curate_bouts-s_b1253_21-plotly-ephys.ipynb
zekearneodo/ceciestunepipe
Simple CNN on dataset
import numpy as np import pandas as pd import keras import matplotlib.pyplot as plt import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential, Model from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization, GlobalAveragePooling2D from keras import backend as K from keras.utils import multi_gpu_model # dimensions of our images. img_width, img_height = 224, 224 train_data_dir = '../dataset/train' validation_data_dir = '../dataset/test' nb_train_samples = 194 nb_validation_samples = 49 epochs = 50 batch_size = 4 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) else: input_shape = (img_width, img_height, 3) from keras.applications.resnet import ResNet50 import numpy as np base_model = ResNet50(weights='imagenet', input_shape=(img_width, img_height, 3), include_top = False, pooling='max') #adam = keras.optimizers.Adam(lr=1e-4) #model.compile(loss='binary_crossentropy', # optimizer=adam, # metrics=['accuracy']) x = base_model.output # x = Flatten()(x) x = Dense(512, activation='relu')(x) x = BatchNormalization()(x) predictions = Dense(1, activation='sigmoid')(x) # this is the model we will train model = Model(inputs=base_model.input, outputs=predictions) adam = keras.optimizers.Adam(lr=1e-4) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) model.summary() # this is the augmentation configuration we will use for training train_datagen = ImageDataGenerator( rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, samplewise_center=True, samplewise_std_normalization=True, brightness_range=(0.1, 0.9)) # this is the augmentation configuration we will use for testing: # only rescaling test_datagen = ImageDataGenerator(rescale=1. / 255, samplewise_center=True, samplewise_std_normalization=True,) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary') history = model.fit_generator( train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size) history.history.keys() # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest', samplewise_center=True, samplewise_std_normalization=True, brightness_range=(0.1, 0.9)) img = load_img('../datasets/train/N/1_30_1_231.jpg') # this is a PIL image x = img_to_array(img) # this is a Numpy array with shape (3, 150, 150) x = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150) # the .flow() command below generates batches of randomly transformed images # and saves the results to the `preview/` directory i = 0 for batch in datagen.flow(x, batch_size=1, save_to_dir='preview', save_prefix='cat', save_format='jpeg'): i += 1 if i > 20: break # otherwise the generator would loop indefinitely x_train = [] for i in range(25): x_batch, y_batch = next(train_generator) x_train.append(x_batch) x_test = [] for i in range(2): x_batch, y_batch = next(validation_generator) x_test.append(x_batch) img = load_img('../dataset/test/N/1_30_2_382.JPG') # this is a PIL image plt.imshow(img) imgNames = os.listdir('../dataset/test/N') for img in imgNames: print("HERE") imgname = '../dataset/test/N/' + img; myimg = load_img(imgname) # this is a PIL image plt.imshow(myimg) plt.show() x_train = np.concatenate(x_train) x_train.shape x_test = np.concatenate(x_test) x_test.shape import shap import numpy as np # select a set of background examples to take an expectation over background = x_train[np.random.choice(x_train.shape[0], 10, replace=True)] # explain predictions of the model on four images e = shap.DeepExplainer(model, background) # ...or pass tensors directly # e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), background) shap_values = e.shap_values(x_test[1:2]) # plot the feature attributions shap.image_plot(shap_values, -x_test[1:2]) x_test[1]
_____no_output_____
MIT
SimpleCNN-ShreyaRESNET50.ipynb
Daniel-Wu/HydraML
Leaflet cluster map of talk locationsRun this from the _talks/ directory, which contains .md files of all your talks. This scrapes the location YAML field from each .md file, geolocates it with geopy/Nominatim, and uses the getorg library to output data, HTML, and Javascript for a standalone cluster map.
!pip install getorg --upgrade import glob import getorg from geopy import Nominatim g = glob.glob("*.md") geocoder = Nominatim() location_dict = {} location = "" permalink = "" title = "" for file in g: with open(file, 'r') as f: lines = f.read() if lines.find('location: "') > 1: loc_start = lines.find('location: "') + 11 lines_trim = lines[loc_start:] loc_end = lines_trim.find('"') location = lines_trim[:loc_end] location_dict[location] = geocoder.geocode(location) print(location, "\n", location_dict[location]) m = getorg.orgmap.create_map_obj() getorg.orgmap.output_html_cluster_map(location_dict, folder_name="../talkmap", hashed_usernames=False)
_____no_output_____
MIT
.ipynb_checkpoints/talkmap-checkpoint.ipynb
jialeishen/academicpages.github.io
Regression Analysis: Seasonal Effects with Sklearn Linear RegressionIn this notebook, you will build a SKLearn linear regression model to predict Yen futures ("settle") returns with *lagged* Yen futures returns.
# Futures contract on the Yen-dollar exchange rate: # This is the continuous chain of the futures contracts that are 1 month to expiration yen_futures = pd.read_csv( Path("./data/yen.csv"), index_col="Date", infer_datetime_format=True, parse_dates=True ) yen_futures.head() # Trim the dataset to begin on January 1st, 1990 yen_futures = yen_futures.loc["1990-01-01":, :] yen_futures.head()
_____no_output_____
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
Data Preparation Returns
# Create a series using "Settle" price percentage returns, drop any nan"s, and check the results: # (Make sure to multiply the pct_change() results by 100) # In this case, you may have to replace inf, -inf values with np.nan"s yen_futures['Return'] = (yen_futures['Settle'].pct_change() *100) yen_futures = yen_futures.replace(-np.inf, np.nan).dropna() yen_futures.tail()
_____no_output_____
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
Lagged Returns
# Create a lagged return using the shift function yen_futures['Lagged_Return'] = yen_futures['Return'].shift() yen_futures = yen_futures.dropna() yen_futures.tail()
_____no_output_____
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
Train Test Split
# Create a train/test split for the data using 2018-2019 for testing and the rest for training train = yen_futures[:'2017'] test = yen_futures['2018':] # Create four dataframes: # X_train (training set using just the independent variables), X_test (test set of just the independent variables) # Y_train (training set using just the "y" variable, i.e., "Futures Return"), Y_test (test set of just the "y" variable): X_train = train['Lagged_Return'].to_frame() X_test = test['Lagged_Return'].to_frame() y_train = train['Return'] y_test = test['Return'] X_train
_____no_output_____
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
Linear Regression Model
# Create a Linear Regression model and fit it to the training data from sklearn.linear_model import LinearRegression # Fit a SKLearn linear regression using just the training set (X_train, Y_train): model = LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False) model.fit(X_train, y_train)
_____no_output_____
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
Make predictions using the Testing DataNote: We want to evaluate the model using data that it has never seen before, in this case: X_test.
# Make a prediction of "y" values using just the test dataset predictions = model.predict(X_test) # Assemble actual y data (Y_test) with predicted y data (from just above) into two columns in a dataframe: results = y_test.to_frame() results['Predicted Return'] = predictions results.head() # Plot the first 20 predictions vs the true values results[:20].plot(title='Return vs Predicted Return', subplots=True, figsize=(12,8))
_____no_output_____
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
Out-of-Sample PerformanceEvaluate the model using "out-of-sample" data (X_test and y_test)
from sklearn.metrics import mean_squared_error, r2_score # Calculate the mean_squared_error (MSE) on actual versus predicted test "y" mse = mean_squared_error(results['Return'], results['Predicted Return']) # Using that mean-squared-error, calculate the root-mean-squared error (RMSE): rmse = np.sqrt(mse) print(f'Out-of-Sample Root Mean Squared Error (RMSE): {rmse}')
Out-of-Sample Root Mean Squared Error (RMSE): 0.4154832784856737
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
In-Sample PerformanceEvaluate the model using in-sample data (X_train and y_train)
# Construct a dataframe using just the "y" training data: df_in_sample_results = y_train.to_frame() # Add a column of "in-sample" predictions to that dataframe: df_in_sample_results['In-Sample'] = model.predict(X_train) # Calculate in-sample mean_squared_error (for comparison to out-of-sample) mse = mean_squared_error(df_in_sample_results['Return'], df_in_sample_results['In-Sample']) # Calculate in-sample root mean_squared_error (for comparison to out-of-sample) rmse = np.sqrt(mse) # Output root mean squared error print(f'In-Sample Root Mean Squared Error (RMSE): {rmse}')
In-Sample Root Mean Squared Error (RMSE): 0.5963660785073426
MIT
regression_analysis.ipynb
jonowens/a_yen_for_the_future
DAT210x - Programming with Python for DS Module5- Lab6
import random, math import pandas as pd import numpy as np import scipy.io from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn import manifold from sklearn.neighbors import KNeighborsClassifier from mpl_toolkits.mplot3d import Axes3D import matplotlib import matplotlib.pyplot as plt matplotlib.style.use('ggplot') # Look Pretty # Leave this alone until indicated: Test_PCA = False
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
A Convenience Function This method is for your visualization convenience only. You aren't expected to know how to put this together yourself, although you should be able to follow the code by now:
def Plot2DBoundary(DTrain, LTrain, DTest, LTest): # The dots are training samples (img not drawn), and the pics are testing samples (images drawn) # Play around with the K values. This is very controlled dataset so it should be able to get perfect classification on testing entries # Play with the K for isomap, play with the K for neighbors. fig = plt.figure() ax = fig.add_subplot(111) ax.set_title('Transformed Boundary, Image Space -> 2D') padding = 0.1 # Zoom out resolution = 1 # Don't get too detailed; smaller values (finer rez) will take longer to compute colors = ['blue','green','orange','red'] # ------ # Calculate the boundaries of the mesh grid. The mesh grid is # a standard grid (think graph paper), where each point will be # sent to the classifier (KNeighbors) to predict what class it # belongs to. This is why KNeighbors has to be trained against # 2D data, so we can produce this countour. Once we have the # label for each point on the grid, we can color it appropriately # and plot it. x_min, x_max = DTrain[:, 0].min(), DTrain[:, 0].max() y_min, y_max = DTrain[:, 1].min(), DTrain[:, 1].max() x_range = x_max - x_min y_range = y_max - y_min x_min -= x_range * padding y_min -= y_range * padding x_max += x_range * padding y_max += y_range * padding # Using the boundaries, actually make the 2D Grid Matrix: xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)) # What class does the classifier say about each spot on the chart? # The values stored in the matrix are the predictions of the model # at said location: Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the mesh grid as a filled contour plot: plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, z=-100) # ------ # When plotting the testing images, used to validate if the algorithm # is functioning correctly, size them as 5% of the overall chart size x_size = x_range * 0.05 y_size = y_range * 0.05 # First, plot the images in your TEST dataset img_num = 0 for index in LTest.index: # DTest is a regular NDArray, so you'll iterate over that 1 at a time. x0, y0 = DTest[img_num,0]-x_size/2., DTest[img_num,1]-y_size/2. x1, y1 = DTest[img_num,0]+x_size/2., DTest[img_num,1]+y_size/2. # DTest = our images isomap-transformed into 2D. But we still want # to plot the original image, so we look to the original, untouched # dataset (at index) to get the pixels: img = df.iloc[index,:].reshape(num_pixels, num_pixels) ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1), alpha=0.8) img_num += 1 # Plot your TRAINING points as well... as points rather than as images for label in range(len(np.unique(LTrain))): indices = np.where(LTrain == label) ax.scatter(DTrain[indices, 0], DTrain[indices, 1], c=colors[label], alpha=0.8, marker='o') # Plot plt.show()
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
The Assignment Use the same code from Module4/assignment4.ipynb to load up the `face_data.mat` file into a dataframe called `df`. Be sure to calculate the `num_pixels` value, and to rotate the images to being right-side-up instead of sideways. This was demonstrated in the [Lab Assignment 4](https://github.com/authman/DAT210x/blob/master/Module4/assignment4.ipynb) code.
# .. your code here .. mat = scipy.io.loadmat('Datasets/face_data.mat') df = pd.DataFrame(mat['images']).T num_images, num_pixels = df.shape num_pixels = int(math.sqrt(num_pixels)) # Rotate the pictures, so we don't have to crane our necks: for i in range(num_images): df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
Load up your face_labels dataset. It only has a single column, and you're only interested in that single column. You will have to slice the column out so that you have access to it as a "Series" rather than as a "Dataframe". This was discussed in the the "Slicin'" lecture of the "Manipulating Data" reading on the course website. Use an appropriate indexer to take care of that. Be sure to print out the labels and compare what you see to the raw `face_labels.csv` so you know you loaded it correctly.
# .. your code here .. face_labels = pd.read_csv('Datasets/face_labels.csv',header=None) label = face_labels.iloc[:, 0] len(label) df.shape df.head() y.head
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
Do `train_test_split`. Use the same code as on the EdX platform in the reading material, but set the random_state=7 for reproducibility, and the test_size to 0.15 (150%). Your labels are actually passed in as a series (instead of as an NDArray) so that you can access their underlying indices later on. This is necessary so you can find your samples in the original dataframe. The convenience methods we've written for you that handle drawing expect this, so that they can plot your testing data as images rather than as points:
# .. your code here .. x_train, x_test, y_train, y_test = train_test_split(df, label, test_size=0.15, random_state=7)
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
Dimensionality Reduction
if Test_PCA: # INFO: PCA is used *before* KNeighbors to simplify your high dimensionality # image samples down to just 2 principal components! A lot of information # (variance) is lost during the process, as I'm sure you can imagine. But # you have to drop the dimension down to two, otherwise you wouldn't be able # to visualize a 2D decision surface / boundary. In the wild, you'd probably # leave in a lot more dimensions, which is better for higher accuracy, but # worse for visualizing the decision boundary; # # Your model should only be trained (fit) against the training data (data_train) # Once you've done this, you need use the model to transform both data_train # and data_test from their original high-D image feature space, down to 2D # TODO: Implement PCA here. ONLY train against your training data, but # transform both your training + test data, storing the results back into # data_train, and data_test. # .. your code here .. pca = PCA(n_components=2, svd_solver='full') pca.fit(x_train) data_train = pca.transform(x_train) data_test = pca.transform(x_test) else: # INFO: Isomap is used *before* KNeighbors to simplify your high dimensionality # image samples down to just 2 components! A lot of information has been is # lost during the process, as I'm sure you can imagine. But if you have # non-linear data that can be represented on a 2D manifold, you probably will # be left with a far superior dataset to use for classification. Plus by # having the images in 2D space, you can plot them as well as visualize a 2D # decision surface / boundary. In the wild, you'd probably leave in a lot more # dimensions, which is better for higher accuracy, but worse for visualizing the # decision boundary; # Your model should only be trained (fit) against the training data (data_train) # Once you've done this, you need use the model to transform both data_train # and data_test from their original high-D image feature space, down to 2D # TODO: Implement Isomap here. ONLY train against your training data, but # transform both your training + test data, storing the results back into # data_train, and data_test. # .. your code here .. iso = manifold.Isomap(n_neighbors=5, n_components=2) iso.fit(x_train) data_train = iso.transform(x_train) data_test = iso.transform(x_test)
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
Implement `KNeighborsClassifier` here. You can use any K value from 1 through 20, so play around with it and attempt to get good accuracy. Fit the classifier against your training data and labels.
# .. your code here .. knn = KNeighborsClassifier(n_neighbors=3) knn = knn.fit(x_train, y_train) knn.score(x_test, y_test)
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
Calculate and display the accuracy of the testing set (data_test and label_test):
# .. your code here .. knn.score(x_test, y_test) scores = pd.DataFrame(columns=['n_neighbors', 'model_score']) type(scores); scores.dtypes; scores.shape; scores.head(3) for i in range(1, 21): # try K value from 1 through 20 in an attempt to find good accuracy score = KNeighborsClassifier(n_neighbors=i).fit(x_train, y_train).score(x_test, y_test) scores.loc[i-1] = [int(i), score] # or scores.loc[len(scores)] = [int(i), score] scores.model_score.unique() # | scores['model_score'] | scores.loc[:, 'model_score'] | scores.iloc[:, 1] scores.groupby('model_score').model_score.unique(); print(' ') # prints unique values with all decimal points scores.groupby('model_score').n_neighbors.unique(); print(' ') scores.groupby('model_score').n_neighbors.nunique() scores
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
Let's chart the combined decision boundary, the training data as 2D plots, and the testing data as small images so we can visually validate performance:
Plot2DBoundary(x_train, x_train, y_test, y_test)
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
After submitting your answers, experiment with using using PCA instead of ISOMap. Are the results what you expected? Also try tinkering around with the test/train split percentage from 10-20%. Notice anything?
# .. your code changes above ..
_____no_output_____
MIT
Module5/Module5 - Lab6.ipynb
jacburge/pythonfordatascience
PIC data
from astropy.constants import m_e, e, k_B k = k_B.value me = m_e.value q = e.value import numpy as np import matplotlib.pyplot as plt import json %matplotlib notebook from scipy.interpolate import interp1d from math import ceil plt.style.use("presentation") with open("NewPic1D.dat", "r") as f: dataPIC = json.load(f) # with open("PIC_data.dat", "r") as f: # dataPIC = json.load(f) with open("NewPIC_EVDFs.dat", "r") as f: data = json.load(f) # with open("PIC_EVDFs.dat", "r") as f: # data = json.load(f) print(data.keys()) print("~~~~~~~~~~~~~~~ \n") print(data["info"]) print("~~~~~~~~~~~~~~~ \n") print("Run disponibles") for k in ["0","1","2"]: run = data[k] print(k," p = ",run["p"], "mTorr") dx = dataPIC["0"]["dx"] k = '0' probnames = np.array(data[k]["probnames"]) prob_center = np.array(data[k]["prob_center"]) prob_y0 = np.array(data[k]["prob_y0"]) prob_y1 = np.array(data[k]["prob_y1"]) print(probnames) print(prob_center) dx = data[k]["dx"]*1000 def returnxy(pn, k="1"): a = np.array(data[k][pn]['absciss']) V = np.array(data[k][pn]['EVDF']) idenx = 1 x = a[:,idenx] x = x**2*np.sign(x)*me/q/2 y = V[:,idenx] index = np.argwhere(pn == probnames)[0][0] xcenter = prob_center[index] x0 = int(prob_y0[index]) x1 = int(prob_y1[index]) phi = np.array(dataPIC[k]["phi"]) pc = interp1d(np.arange(len(phi)),phi)(xcenter) p0 = phi[x0] p1 = phi[x1] # p = phi[int(xcenter)] return x, y, pc , p0, p1 # plot plt.figure(figsize=(4.5,4)) plt.subplots_adjust(left=0.17, bottom=0.17, right=0.99, top=0.925, wspace=0.05, hspace=0.25) ft = 14 s = 2.5 for Nprob in range(len(probnames)): x, y, phic, phi0, phi1 = returnxy(probnames[Nprob]) # x, y, phic = returnxy(probnames[Nprob]) y0sum = (y).max() T = np.sum(np.abs(x) * y)/y.sum()*2 plt.scatter(phic, T) phi = np.array(dataPIC[k]["phi"]) Te = np.array(dataPIC[k]["Te2"]) plt.plot(phi, Te,linewidth=s, alpha=0.7,ls="--" ) # plt.legend( fontsize=ft,loc=(1,0.1 )) plt.legend(loc = 'lower left', fontsize=11) plt.grid(alpha=0.5) plt.ylabel("Te", fontsize=ft) plt.xlabel("$\phi$ [V]", fontsize=ft)
/home/tavant/these/code/venv/stand/lib64/python3.7/site-packages/matplotlib/figure.py:2144: UserWarning: This figure was using constrained_layout==True, but that is incompatible with subplots_adjust and or tight_layout: setting constrained_layout==False. warnings.warn("This figure was using constrained_layout==True, " No handles with labels found to put in legend.
Unlicense
src/Chapitre3/figure/Figure_HeatFlux.ipynb
antoinetavant/PhD_thesis_manuscript
Heatflux from EVDF
k = "0" Nprob = -1 x, y, phic, phi0, phi1 = returnxy(probnames[Nprob], k=k) plt.figure(figsize=(4.5,4.5)) plt.subplots_adjust(left=0.17, bottom=0.17, right=0.99, top=0.925, wspace=0.05, hspace=0.25) plt.plot(x,y) Nprob = 1 x, y, phic, phi0, phi1 = returnxy(probnames[Nprob], k=k) plt.plot(x,y) plt.yscale("log") plt.vlines([phic,phic*1.3], 0.001,1e5) plt.ylim(bottom=10) k = "0" Nprob = 2 x, y, phic, phi0, phi1 = returnxy(probnames[Nprob], k=k) plt.figure(figsize=(4.5,4.5)) plt.subplots_adjust(left=0.17, bottom=0.17, right=0.99, top=0.925, wspace=0.05, hspace=0.25) plt.plot(x,y) plt.yscale("log") plt.vlines([phic,phic*1.3], 0.001,1e5) plt.xlim(0, 20) plt.ylim(bottom=10) from scipy.integrate import simps def return_heat_flux(k="0", Nprob=2, cut=True): x, y, phic, phi0, phi1 = returnxy(probnames[Nprob], k=k) y /= y.sum() if cut: mask = (x>phic) & (x<=1.1*phic) else: mask = (x>phic) heatflux = simps(0.5*x[mask]*y[mask], x[mask]) flux = simps(y[mask], x[mask]) x, y, phic, phi0, phi1 = returnxy(probnames[9], k=k) mask = (x>0) T = np.sum(np.abs(x[mask]) * y[mask])/y[mask].sum()*2 return heatflux/flux/T plt.figure() for gamma, k in zip([1.6, 1.43, 1.41], ["0", "1", "2"]): plt.scatter(gamma, return_heat_flux(k, Nprob=3), c="k", label="WITHOUT HIGH ENERGY TAIL") plt.scatter(gamma, return_heat_flux(k, Nprob=3, cut=False), c="b", label="WITH HIGH ENERGY TAIL") plt.legend()
_____no_output_____
Unlicense
src/Chapitre3/figure/Figure_HeatFlux.ipynb
antoinetavant/PhD_thesis_manuscript
Capture SpectrumBelow we will generate plots for both the real and simulated capture spectra. If you are re-generating the plots, please be patient, as both load large amounts of data. Measured Spectra*(If you are not interested in the code itself, you can collapse it by selecting the cell and then clicking on the bar to its left. You will still be able to run it and view its output.)*Measured spectra and reconstructed PuBe rate. First, the reconstructed spetra were calculated by dividing the measured count rates by the livetimes and estimated effficiences after applying quality cuts. Then, the background rate was subtracted from the overall rate, leaving the events due to the PuBe source.A simulated measured spectrum was built using Geant4. We then compared this to our actual data using MCMC sampling (`fitting` directory) as well as a simpler integral method (`Integral_Method.ipynb`).
#Import libraries and settings from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) exec(open("../python/nb_setup.py").read()) from constants import * import R68_load as r68 import R68_efficiencies as eff meas=r68.load_measured() import R68_spec_tools as spec from matplotlib import * style.use('../mplstyles/stylelib/standard.mplstyle') #Turn off expected warnings logging.getLogger('matplotlib.font_manager').disabled = True warnings.filterwarnings("ignore",category=RuntimeWarning) fig_w=7 #Used later for figure width # Binning setup Emax = 2000 #eVee Ebins=np.linspace(0,Emax,201) Ebins_ctr=(Ebins[:-1]+Ebins[1:])/2 Efit_min=50#[eVee] Efit_max=1750#[eVee] spec_bounds=(np.digitize(Efit_min,Ebins)-1,np.digitize(Efit_max,Ebins)-1) Ebins_ctr[slice(*spec_bounds)].shape #Measured N_meas_PuBe,_ = np.histogram(meas['PuBe']['E'],bins=Ebins) N_meas_Bkg,_ = np.histogram(meas['Bkg']['E'],bins=Ebins) #Count uncertainties are Poisson dN_meas_PuBe_Pois=np.sqrt(N_meas_PuBe) dN_meas_Bkg_Pois=np.sqrt(N_meas_Bkg) #Include uncertainty from efficiencies dN_meas_PuBe = N_meas_PuBe*np.sqrt( (dN_meas_PuBe_Pois/N_meas_PuBe)**2 + (eff.deff_write/eff.eff_write)**2 + (eff.dcutEffFit(Ebins_ctr)/eff.cutEffFit(Ebins_ctr))**2) dN_meas_Bkg = N_meas_Bkg*np.sqrt( (dN_meas_Bkg_Pois/N_meas_Bkg)**2 + (eff.deff_write_bkg/eff.eff_write_bkg)**2 + (eff.dcutEffFit_bkg(Ebins_ctr)/eff.cutEffFit_bkg(Ebins_ctr))**2) #Scaling factors tlive_ratio=meas['PuBe']['tlive']/meas['Bkg']['tlive'] writeEff_ratio=eff.eff_write/eff.eff_write_bkg dwriteEff_ratio=writeEff_ratio*np.sqrt( (eff.deff_write/eff.eff_write)**2 + (eff.deff_write_bkg/eff.eff_write_bkg)**2 ) cutEff_ratio=eff.cutEffFit(Ebins_ctr)/eff.cutEffFit_bkg(Ebins_ctr) dcutEff_ratio = cutEff_ratio*np.sqrt( (eff.dcutEffFit(Ebins_ctr)/eff.cutEffFit(Ebins_ctr))**2 + (eff.dcutEffFit_bkg(Ebins_ctr)/eff.cutEffFit_bkg(Ebins_ctr))**2 ) ratio=tlive_ratio*writeEff_ratio*cutEff_ratio dratio=ratio*np.sqrt( (dwriteEff_ratio/writeEff_ratio)**2 +(dcutEff_ratio/cutEff_ratio)**2 ) #Make sure any divide by 0s happened below threshold if (not np.all(np.isfinite(ratio[slice(*spec_bounds)]))) or (not np.all(np.isfinite(dratio[slice(*spec_bounds)]))): print('Error: Bad background scaling ratio in fit range.') #Bkg-subtracted measured PuBe signal N_bkg_scaled=N_meas_Bkg*ratio dN_bkg_scaled=N_bkg_scaled*np.sqrt( (dN_meas_Bkg/N_meas_Bkg)**2 + (dratio/ratio)**2 ) N_meas = N_meas_PuBe - N_bkg_scaled dN_meas = np.sqrt( dN_meas_PuBe**2 + dN_bkg_scaled**2 ) #All errors are symmetric here when using the conservative cut eff fits dN_meas_stat = np.sqrt( dN_meas_PuBe_Pois**2 + (dN_meas_Bkg_Pois*ratio)**2 ) Denom_PuBe = meas['PuBe']['tlive']*eff.eff_write*eff.cutEffFit(Ebins_ctr)*eff.trigEff(Ebins_ctr) R_meas_PuBe = N_meas_PuBe/Denom_PuBe Denom_Bkg = meas['Bkg']['tlive']*eff.eff_write_bkg*eff.cutEffFit_bkg(Ebins_ctr)*eff.trigEff(Ebins_ctr) R_meas_Bkg = N_meas_Bkg/Denom_Bkg R_meas = R_meas_PuBe - R_meas_Bkg dR_meas_stat_PuBe = R_meas_PuBe*dN_meas_PuBe_Pois/N_meas_PuBe dR_meas_PuBe = R_meas_PuBe*np.sqrt( (dN_meas_PuBe_Pois/N_meas_PuBe)**2 +\ (eff.deff_write/eff.eff_write)**2 +\ (eff.dcutEffFit(Ebins_ctr)/eff.cutEffFit(Ebins_ctr))**2 +\ (eff.dtrigEff(Ebins_ctr)/eff.trigEff(Ebins_ctr))**2 ) dR_meas_stat_Bkg = R_meas_Bkg*dN_meas_Bkg_Pois/N_meas_Bkg dR_meas_Bkg = R_meas_Bkg*np.sqrt( (dN_meas_Bkg_Pois/N_meas_Bkg)**2 +\ (eff.deff_write_bkg/eff.eff_write_bkg)**2 +\ (eff.dcutEffFit_bkg(Ebins_ctr)/eff.cutEffFit_bkg(Ebins_ctr))**2 +\ (eff.dtrigEff(Ebins_ctr)/eff.trigEff(Ebins_ctr))**2 ) dR_meas_stat = np.sqrt(dR_meas_stat_PuBe**2 + dR_meas_stat_Bkg**2) dR_meas = np.sqrt(dR_meas_PuBe**2 + dR_meas_Bkg**2) c_stat,dc_stat=spec.doBkgSub(r68.load_measured(verbose=False), Ebins, Efit_min, Efit_max, doEffsyst=False, doBurstLeaksyst=False, output='counts') c_syst,dc_syst=spec.doBkgSub(r68.load_measured(verbose=False), Ebins, Efit_min, Efit_max, doEffsyst=True, doBurstLeaksyst=False, output='counts') c_syst2,dc_syst2=spec.doBkgSub(r68.load_measured(verbose=False), Ebins, Efit_min, Efit_max, doEffsyst=True, doBurstLeaksyst=True, output='counts') r_stat,dr_stat=spec.doBkgSub(r68.load_measured(verbose=False), Ebins, Efit_min, Efit_max, doEffsyst=False, doBurstLeaksyst=False, output='reco-rate') r_syst,dr_syst=spec.doBkgSub(r68.load_measured(verbose=False), Ebins, Efit_min, Efit_max, doEffsyst=True, doBurstLeaksyst=False, output='reco-rate') r_syst2,dr_syst2=spec.doBkgSub(r68.load_measured(verbose=False), Ebins, Efit_min, Efit_max, doEffsyst=True, doBurstLeaksyst=True, output='reco-rate') fig,ax = plt.subplots(2,1,sharex=True,figsize=(16,16)) fill_noise=ax[1].axvspan(0,50,color='r', alpha=0.25, label='No efficiency data',zorder=0) cthresh=Ebins_ctr>=50 #Only plot hists above threshold #Raw Count histograms ax[0].set_title('Raw Counts, Statistical Uncertainties',size='40',pad='25') line_c_PuBe=ax[0].errorbar(Ebins_ctr,N_meas_PuBe,yerr=dN_meas_PuBe_Pois, drawstyle = 'steps-mid', linewidth=2) line_c_Bkg=ax[0].errorbar(Ebins_ctr,N_meas_Bkg,yerr=dN_meas_Bkg_Pois, drawstyle = 'steps-mid', linewidth=2) line_thresh=ax[0].axvline(50,linestyle='--',color='r', label='50 eV$_{ee}$ threshold',zorder=5) ax[0].set_xlim(0,5e2) #ax[0].set_yscale('log') ax[0].set_ylim(0,3e3) ax[0].set_ylabel('Counts/bin') ax[0].legend([line_c_PuBe, line_c_Bkg, line_thresh, fill_noise], [f"PuBe, run time = {meas['PuBe']['tlive']/3600:.1f} hrs", f"Bkg, run time = {meas['Bkg']['tlive']/3600:.1f} hrs", '50 eV$_{ee}$ threshold'],fontsize=30) #Reconstructed rate #Reverse errorbar order to be [lower,upper] ax[1].set_title('Reconstructed Background-Subtracted Rate',size='40',pad='25') line_r_syst=ax[1].errorbar(Ebins_ctr[cthresh],60*r_syst2[cthresh],yerr=(60*dr_syst2[::-1])[:,cthresh],drawstyle = 'steps-mid', linewidth=2, label='Stat + Syst') line_r_stat=ax[1].errorbar(Ebins_ctr[cthresh],60*r_stat[cthresh],yerr=60*dr_stat[::-1][:,cthresh],drawstyle = 'steps-mid', linewidth=2, label='Stat') line_thresh=ax[1].axvline(50,linestyle='--',color='r', label='50 eV$_{ee}$ threshold',zorder=5) fill_noise=ax[1].axvspan(0,50,color='r', alpha=0.25, label='No efficiency data',zorder=0) ax[1].legend([line_r_stat, line_r_syst, line_thresh, fill_noise], ['Stat', 'Stat + Syst', '50 eV$_{ee}$ threshold', 'No efficiency info']) ax[1].set_ylim(0,3) ax[1].set_xlim(0,500) ax[1].set_ylabel('Counts/min/bin') ax[1].set_xlabel('Energy [eV$_{ee}$]') plt.tight_layout() plt.savefig('../figures/meas_spec_reco_rate_pretty.pdf') plt.show()
_____no_output_____
MIT
0-Analysis/Capture_Spectrum.ipynb
villano-lab/nrSiCap
Overlaid histograms comparingthe yielded energy PDFs for Sorensen and Lindhard models,including the resolution of the current detector (see `Calibration.ipynb`).The histograms are comprised of approximately simulated cascades.The orange (front) filled histogram represents the Lindhard model while the blue (back) filled histogram represents the Sorensen model.In the Sorensen model, many points are pushed to zero due to the presenceof a cutoff energy,leading to a peak in the first bin that is not present in the Lindhard model.For both models, we use k = 0.178, and for the Sorensen model, we use q= 0.00075.The solid-line unfilled histogram represents the Lindhard model with one fifth of the detector’s resolution,and the dashed unfilled histogram represents the Sorensen model with one fifth of the detector’s resolution.This is not the data generated by Geant4 mentioned above, and is intended only to give an idea of what simulated data looks like. This data is instead generated using [a software package we developed for simulating nuclear recoils](https://github.com/villano-lab/nrCascadeSim).This package does not assume any particular source and does not rely on the same assumptions as Geant4.
#Import Libraries import uproot import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as mpatch plt.style.use('../mplstyles/stylelib/standard.mplstyle') from matplotlib.lines import Line2D from tabulate import tabulate import sys sys.path.append('../python') import nc_kinematics as nck import lindhard as lin import R68_yield as R68y from histogram_utils import histogramable as h #Select a file. file = '../data/longsim.root' #It's running, there's just a lot of data. Please be patient! real_Lind = np.ndarray.flatten(np.asarray(h(file)[0])) real_Sor = np.ndarray.flatten(np.asarray(h(file,model='Sorenson')[0])) small_Lind = np.ndarray.flatten(np.asarray(h(file,scalefactor=0.2)[0])) small_Sor = np.ndarray.flatten(np.asarray(h(file,model='Sorenson',scalefactor=0.2)[0])) real_Lind = real_Lind[real_Lind >= 0] real_Sor = real_Sor[real_Sor >= 0] small_Lind = small_Lind[small_Lind >= 0] small_Sor = small_Sor[small_Sor >= 0] def format_exponent(ax, axis='y'): # Change the ticklabel format to scientific format ax.ticklabel_format(axis=axis, style='sci', scilimits=(-2, 2)) # Get the appropriate axis if axis == 'y': ax_axis = ax.yaxis x_pos = 0.0 y_pos = 1.0 horizontalalignment='left' verticalalignment='bottom' else: ax_axis = ax.xaxis x_pos = 1.0 y_pos = -0.05 horizontalalignment='right' verticalalignment='top' # Run plt.tight_layout() because otherwise the offset text doesn't update plt.tight_layout() # Get the offset value offset = ax_axis.get_offset_text().get_text() if len(offset) > 0: # Get that exponent value and change it into latex format minus_sign = u'\u2212' expo = np.float(offset.replace(minus_sign, '-').split('e')[-1]) offset_text = r'x$\mathregular{10^{%d}}$' %expo # Turn off the offset text that's calculated automatically ax_axis.offsetText.set_visible(False) # Add in a text box at the top of the y axis ax.text(x_pos, y_pos, offset_text, transform=ax.transAxes, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment,fontsize=30) return ax fig, ax = plt.subplots(figsize=(16,12)) binsize = 8 #bin width in eVee bins = np.arange(0,620,binsize) plt.hist(small_Lind,alpha=0.7,label='Small Res (1/5, Lindhard)',histtype='step',edgecolor='black',density='True',linewidth=2,bins=bins) plt.hist(small_Sor,alpha=0.7,label='Small Res (1/5, Sorenson)',histtype='step',edgecolor='black',linestyle='--',density='True',linewidth=2,bins=bins) plt.hist(real_Sor,alpha=0.6,label='Sorenson',histtype='step',fill=True,density='True',bins=bins,linewidth=3,edgecolor='navy',color='C0') plt.hist(real_Lind,alpha=0.6,label='Lindhard',histtype='step',fill=True,density='True',bins=bins,linewidth=3,edgecolor='#a30',color='C1') plt.xlabel(r"Energy Yielded ($\mathrm{eV}_{\mathrm{ee}}$)") plt.ylabel("Probability Distribution (eVee$^{-1}$)")#Counts/(total counts * bin width)") ax = format_exponent(ax, axis='y') ax.tick_params(axis='both',which='major') plt.xlim([0,None]) plt.ylim([6e-13,6e-3]) #Make corner less awkward. Smallest starting value that will make the extra 0 go away #Legend LindPatch = mpatch.Patch(facecolor='C1',edgecolor='#a30',linewidth=3,label='Lindhard',alpha=0.6) SorPatch = mpatch.Patch(facecolor='C0',edgecolor='navy',linewidth=3,label='Sorenson',alpha=0.6) LindLine = Line2D([0],[0],alpha=0.7,color='black',label='Small Res (1/5, Lindhard)') SorLine = Line2D([0],[0],linestyle='--',alpha=0.7,color='black',label='Small Res (1/5, Sorenson)') plt.legend(handles=[LindPatch,SorPatch,LindLine,SorLine]) plt.savefig('../figures/SorVsLin.pdf') plt.show()
dict_keys(['xx', 'yy', 'ex', 'ey'])
MIT
0-Analysis/Capture_Spectrum.ipynb
villano-lab/nrSiCap
Statistical exploration for Bayesian analysis of PhIP-seq
import pandas as pd import numpy as np import scipy as sp import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline cpm = pd.read_csv('/Users/laserson/tmp/phip_analysis/phip-9/cpm.tsv', sep='\t', header=0, index_col=0) upper_bound = sp.stats.scoreatpercentile(cpm.values.ravel(), 99.9) upper_bound fig, ax = plt.subplots() _ = ax.hist(cpm.values.ravel(), bins=100, log=True) _ = ax.set(title='cpm') fig, ax = plt.subplots() _ = ax.hist(np.log10(cpm.values.ravel() + 0.5), bins=100, log=False) _ = ax.set(title='log10(cpm + 0.5)') fig, ax = plt.subplots() _ = ax.hist(np.log10(cpm.values.ravel() + 0.5), bins=100, log=True) _ = ax.set(title='log10(cpm + 0.5)')
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Plot only the lowest 99.9% of the data
fig, ax = plt.subplots() _ = ax.hist(cpm.values.ravel()[cpm.values.ravel() <= upper_bound], bins=range(100), log=False) _ = ax.set(xlim=(0, 60)) _ = ax.set(title='trimmed cpm') trimmed_cpm = cpm.values.ravel()[cpm.values.ravel() <= upper_bound] trimmed_cpm.mean(), trimmed_cpm.std() means = cpm.apply(lambda x: x[x <= upper_bound].mean(), axis=1, raw=True) _, edges = np.histogram(means, bins=[sp.stats.scoreatpercentile(means, p) for p in np.linspace(0, 100, 10)]) def plot_hist(ax, a): h, e = np.histogram(a, bins=100, range=(0, upper_bound), density=True) ax.hlines(h, e[:-1], e[1:]) for i in range(len(edges[:-1])): left = edges[i] right = edges[i + 1] rows = (means >= left) & (means <= right) values = cpm[rows].values.ravel() fig, ax = plt.subplots() plot_hist(ax, values) ax.set(xlim=(0, 50), title='mean in ({}, {})'.format(left, right))
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Do the slices look Poisson?
a = np.random.poisson(8, 10000) fig, ax = plt.subplots() plot_hist(ax, a) ax.set(xlim=(0, 50))
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
For the most part. Maybe try NegBin just in case What does the distribution of the trimmed means look like?
fig, ax = plt.subplots() plot_hist(ax, means) ax.set(xlim=(0, 50)) a = np.random.gamma(1, 10, 10000) fig, ax = plt.subplots() plot_hist(ax, a) ax.set(xlim=(0, 50)) means.mean()
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Following Anders and Huber, _Genome Biology_ 2010, compute some of their stats Compute size factors
s = np.exp(np.median(np.log(cpm.values + 0.5) - np.log(cpm.values + 0.5).mean(axis=1).reshape((cpm.shape[0], 1)), axis=0)) _ = sns.distplot(s) q = (cpm.values / s).mean(axis=1) fig, ax = plt.subplots() _ = ax.hist(q, bins=100, log=False) fig, ax = plt.subplots() _ = ax.hist(q, bins=100, log=True) w = (cpm.values / s).std(axis=1, ddof=1) fig, ax = plt.subplots() _ = ax.hist(w, bins=100, log=True) fig, ax = plt.subplots() _ = ax.scatter(q, w) _ = sns.lmplot('q', 'w', pd.DataFrame({'q': q, 'w': w})) list(zip(cpm.values.sum(axis=0), s)) s a = np.random.gamma(30, 1/30, 1000) sns.distplot(a)
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Proceeding with the following strategy/modelTrim data to remove top 0.1% of count values. Compute mean of each row and use the means to fit a gamma distribution. Using these values, define a posterior on a rate for each clone, assuming Poisson stats for each cell. This means the posterior is also gamma distributed. Then compute the probability of seeing a more extreme value, weighted with the posterior on r_i.
import pystan cpm = pd.read_csv('/Users/laserson/tmp/phip_analysis/phip-9/cpm.tsv', sep='\t', header=0, index_col=0) upper_bound = sp.stats.scoreatpercentile(cpm.values, 99.9) trimmed_means = cpm.apply(lambda x: x[x <= upper_bound].mean(), axis=1, raw=True).values brm = pystan.StanModel(model_name='background_rates', file='/Users/laserson/repos/bamophip/background_rates.stan') data = { 'num_clones': trimmed_means.shape[0], 'trimmed_means': trimmed_means } br_fit = brm.sampling(data=data, iter=2000, chains=4) br_fit br_fit.plot() alpha, beta, _ = br_fit.get_posterior_mean().mean(axis=1) alpha, beta h, e = np.histogram(np.random.gamma(alpha, 1 / beta, 50000), bins='auto', density=True) fig, ax = plt.subplots() _ = ax.hist(trimmed_means, bins=100, normed=True) _ = ax.hlines(h, e[:-1], e[1:]) _ = ax.set(xlim=(0, 50)) # assumes the counts for each clone are Poisson distributed with the learned Gamma prior # Therefore, the posterior is Gamma distributed, and we use the expression for its expected value trimmed_sums = cpm.apply(lambda x: x[x <= upper_bound].sum(), axis=1, raw=True).values trimmed_sizes = cpm.apply(lambda x: (x <= upper_bound).sum(), axis=1, raw=True).values background_rates = (alpha + trimmed_sums) / (beta + trimmed_sizes) # mlxp is "minus log 10 pval" mlxp = [] for i in range(cpm.shape[0]): mlxp.append(-sp.stats.poisson.logsf(cpm.values[i], background_rates[i]) / np.log(10)) mlxp = np.asarray(mlxp) fig, ax = plt.subplots() h, e = np.histogram(10**(-mlxp.ravel()), bins='auto') ax.hlines(h, e[:-1], e[1:]) ax.set(xlim=(0, 1)) fig, ax = plt.subplots() finite = np.isfinite(mlxp.ravel()) _ = ax.hist(mlxp.ravel()[finite], bins=100, log=True) fig, ax = plt.subplots() finite = np.isfinite(mlxp.ravel()) _ = ax.hist(np.log10(mlxp.ravel()[finite] + 0.5), bins=100, log=True) old_pvals = pd.read_csv('/Users/laserson/tmp/phip_analysis/phip-9/pvals.tsv', sep='\t', header=0, index_col=0) fig, ax = plt.subplots() h, e = np.histogram(10**(-old_pvals.values.ravel()), bins='auto') ax.hlines(h, e[:-1], e[1:]) ax.set(xlim=(0, 1)) (old_pvals.values.ravel() > 10).sum() (mlxp > 10).sum() len(mlxp.ravel())
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Can we use scipy's MLE for the gamma parameters instead?
sp.stats.gamma.fit(trimmed_means) fig, ax = plt.subplots() _ = ax.hist(sp.stats.gamma.rvs(a=0.3387, loc=0, scale=3.102, size=10000), bins=100) _ = ax.set(xlim=(0, 50))
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Hmmm...doesn't appear to get the correct solution. Alternatively, let's try optimizing the log likelihood ourselves
pos = trimmed_means > 0 n = len(trimmed_means) s = trimmed_means[pos].sum() sl = np.log(trimmed_means[pos]).sum() def ll(x): return -1 * (n * x[0] * np.log(x[1]) - n * sp.special.gammaln(x[0]) + (x[0] - 1) * sl - x[1] * s) param = sp.optimize.minimize(ll, np.asarray([2, 1]), bounds=[(np.nextafter(0, 1), None), (np.nextafter(0, 1), None)]) param param.x
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
SUCCESS! Do the p-values have a correlation with the peptide abundance?
mlxp = pd.read_csv('/Users/laserson/tmp/phip_analysis/sjogrens/mlxp.tsv', sep='\t', index_col=0, header=0) inputs = pd.read_csv('/Users/laserson/repos/phage_libraries_private/human90/inputs/human90-larman1-input.tsv', sep='\t', index_col=0, header=0) m = pd.merge(mlxp, inputs, left_index=True, right_index=True) sample = 'Sjogrens.serum.Sjogrens.FS12-03967.20A20G.1' sp.stats.pearsonr(10**(-m[sample]), m['input']) sp.stats.spearmanr(10**(-m[sample]), m['input']) fig, ax = plt.subplots() _ = ax.scatter(10**(-m[sample]), m['input']) fig, ax = plt.subplots() _ = ax.scatter(m[sample], m['input']) h, xe, ye = np.histogram2d(m[sample], m['input'], bins=100) fig, ax = plt.subplots() _ = ax.imshow(h) np.histogram2d
_____no_output_____
Apache-2.0
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
lasersonlab/phip-stat
Intent Recognition with BERT using Keras and TensorFlow 2
!nvidia-smi !pip install tensorflow-gpu >> /dev/null !pip install --upgrade grpcio >> /dev/null !pip install tqdm >> /dev/null !pip install bert-for-tf2 >> /dev/null !pip install sentencepiece >> /dev/null import os import math import datetime from tqdm import tqdm import pandas as pd import numpy as np import tensorflow as tf from tensorflow import keras import bert from bert import BertModelLayer from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights from bert.tokenization.bert_tokenization import FullTokenizer import seaborn as sns from pylab import rcParams import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator from matplotlib import rc from sklearn.metrics import confusion_matrix, classification_report %matplotlib inline %config InlineBackend.figure_format='retina' sns.set(style='whitegrid', palette='muted', font_scale=1.2) HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"] sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE)) rcParams['figure.figsize'] = 12, 8 RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) tf.random.set_seed(RANDOM_SEED)
_____no_output_____
MIT
19_intent_classification.ipynb
evergreenllc2020/Deep-Learning-For-Hackers
DataThe data contains various user queries categorized into seven intents. It is hosted on [GitHub](https://github.com/snipsco/nlu-benchmark/tree/master/2017-06-custom-intent-engines) and is first presented in [this paper](https://arxiv.org/abs/1805.10190).
!gdown --id 1OlcvGWReJMuyYQuOZm149vHWwPtlboR6 --output train.csv !gdown --id 1Oi5cRlTybuIF2Fl5Bfsr-KkqrXrdt77w --output valid.csv !gdown --id 1ep9H6-HvhB4utJRLVcLzieWNUSG3P_uF --output test.csv train = pd.read_csv("train.csv") valid = pd.read_csv("valid.csv") test = pd.read_csv("test.csv") train = train.append(valid).reset_index(drop=True) train.shape train.head() chart = sns.countplot(train.intent, palette=HAPPY_COLORS_PALETTE) plt.title("Number of texts per intent") chart.set_xticklabels(chart.get_xticklabels(), rotation=30, horizontalalignment='right');
_____no_output_____
MIT
19_intent_classification.ipynb
evergreenllc2020/Deep-Learning-For-Hackers
Intent Recognition with BERT
!wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip !unzip uncased_L-12_H-768_A-12.zip os.makedirs("model", exist_ok=True) !mv uncased_L-12_H-768_A-12/ model bert_model_name="uncased_L-12_H-768_A-12" bert_ckpt_dir = os.path.join("model/", bert_model_name) bert_ckpt_file = os.path.join(bert_ckpt_dir, "bert_model.ckpt") bert_config_file = os.path.join(bert_ckpt_dir, "bert_config.json")
_____no_output_____
MIT
19_intent_classification.ipynb
evergreenllc2020/Deep-Learning-For-Hackers
Preprocessing
class IntentDetectionData: DATA_COLUMN = "text" LABEL_COLUMN = "intent" def __init__(self, train, test, tokenizer: FullTokenizer, classes, max_seq_len=192): self.tokenizer = tokenizer self.max_seq_len = 0 self.classes = classes ((self.train_x, self.train_y), (self.test_x, self.test_y)) = map(self._prepare, [train, test]) print("max seq_len", self.max_seq_len) self.max_seq_len = min(self.max_seq_len, max_seq_len) self.train_x, self.test_x = map(self._pad, [self.train_x, self.test_x]) def _prepare(self, df): x, y = [], [] for _, row in tqdm(df.iterrows()): text, label = row[IntentDetectionData.DATA_COLUMN], row[IntentDetectionData.LABEL_COLUMN] tokens = self.tokenizer.tokenize(text) tokens = ["[CLS]"] + tokens + ["[SEP]"] token_ids = self.tokenizer.convert_tokens_to_ids(tokens) self.max_seq_len = max(self.max_seq_len, len(token_ids)) x.append(token_ids) y.append(self.classes.index(label)) return np.array(x), np.array(y) def _pad(self, ids): x = [] for input_ids in ids: input_ids = input_ids[:min(len(input_ids), self.max_seq_len - 2)] input_ids = input_ids + [0] * (self.max_seq_len - len(input_ids)) x.append(np.array(input_ids)) return np.array(x) tokenizer = FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, "vocab.txt")) tokenizer.tokenize("I can't wait to visit Bulgaria again!") tokens = tokenizer.tokenize("I can't wait to visit Bulgaria again!") tokenizer.convert_tokens_to_ids(tokens) def create_model(max_seq_len, bert_ckpt_file): with tf.io.gfile.GFile(bert_config_file, "r") as reader: bc = StockBertConfig.from_json_string(reader.read()) bert_params = map_stock_config_to_params(bc) bert_params.adapter_size = None bert = BertModelLayer.from_params(bert_params, name="bert") input_ids = keras.layers.Input(shape=(max_seq_len, ), dtype='int32', name="input_ids") bert_output = bert(input_ids) print("bert shape", bert_output.shape) cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output) cls_out = keras.layers.Dropout(0.5)(cls_out) logits = keras.layers.Dense(units=768, activation="tanh")(cls_out) logits = keras.layers.Dropout(0.5)(logits) logits = keras.layers.Dense(units=len(classes), activation="softmax")(logits) model = keras.Model(inputs=input_ids, outputs=logits) model.build(input_shape=(None, max_seq_len)) load_stock_weights(bert, bert_ckpt_file) return model
_____no_output_____
MIT
19_intent_classification.ipynb
evergreenllc2020/Deep-Learning-For-Hackers
Training
classes = train.intent.unique().tolist() data = IntentDetectionData(train, test, tokenizer, classes, max_seq_len=128) data.train_x.shape data.train_x[0] data.train_y[0] data.max_seq_len model = create_model(data.max_seq_len, bert_ckpt_file) model.summary() model.compile( optimizer=keras.optimizers.Adam(1e-5), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")] ) log_dir = "log/intent_detection/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%s") tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir) history = model.fit( x=data.train_x, y=data.train_y, validation_split=0.1, batch_size=16, shuffle=True, epochs=5, callbacks=[tensorboard_callback] )
Train on 12405 samples, validate on 1379 samples Epoch 1/5 5392/12405 [============>.................] - ETA: 2:51 - loss: 1.4535 - acc: 0.7361
MIT
19_intent_classification.ipynb
evergreenllc2020/Deep-Learning-For-Hackers
Evaluation
%load_ext tensorboard %tensorboard --logdir log ax = plt.figure().gca() ax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax.plot(history.history['loss']) ax.plot(history.history['val_loss']) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['train', 'test']) plt.title('Loss over training epochs') plt.show(); ax = plt.figure().gca() ax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax.plot(history.history['acc']) ax.plot(history.history['val_acc']) plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['train', 'test']) plt.title('Accuracy over training epochs') plt.show(); _, train_acc = model.evaluate(data.train_x, data.train_y) _, test_acc = model.evaluate(data.test_x, data.test_y) print("train acc", train_acc) print("test acc", test_acc) y_pred = model.predict(data.test_x).argmax(axis=-1) print(classification_report(data.test_y, y_pred, target_names=classes)) cm = confusion_matrix(data.test_y, y_pred) df_cm = pd.DataFrame(cm, index=classes, columns=classes) hmap = sns.heatmap(df_cm, annot=True, fmt="d") hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right') hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right') plt.ylabel('True label') plt.xlabel('Predicted label'); sentences = [ "Play our song now", "Rate this book as awful" ] pred_tokens = map(tokenizer.tokenize, sentences) pred_tokens = map(lambda tok: ["[CLS]"] + tok + ["[SEP]"], pred_tokens) pred_token_ids = list(map(tokenizer.convert_tokens_to_ids, pred_tokens)) pred_token_ids = map(lambda tids: tids +[0]*(data.max_seq_len-len(tids)),pred_token_ids) pred_token_ids = np.array(list(pred_token_ids)) predictions = model.predict(pred_token_ids).argmax(axis=-1) for text, label in zip(sentences, predictions): print("text:", text, "\nintent:", classes[label]) print()
_____no_output_____
MIT
19_intent_classification.ipynb
evergreenllc2020/Deep-Learning-For-Hackers
Welche Dateitypen gibt es? 1. Verbindung zur DatenbankEs wird eine Verbindung zur Neo4j-Datenbank aufgebaut.
import py2neo graph = py2neo.Graph(bolt=True, host='localhost', user='neo4j', password='neo4j')
_____no_output_____
Apache-2.0
6. Dateitypen.ipynb
softvis-research/BeLL
2. Cypher-AbfrageEs wird eine Abfrage an die Datenbank gestellt. Das Ergebnis wird in einem Dataframe (pandas) gespeichert.
import pandas as pd query ="MATCH (f:Git:File) RETURN f.relativePath as relativePath" df = pd.DataFrame(graph.run(query).data())
_____no_output_____
Apache-2.0
6. Dateitypen.ipynb
softvis-research/BeLL
3. DatenaufbereitungZur Kontrolle werden die ersten fünf Zeilen des Ergebnisses der Abfrage als Tabelle ausgegeben.
df.head()
_____no_output_____
Apache-2.0
6. Dateitypen.ipynb
softvis-research/BeLL
Der folgende Codeabschnitt extrahiert die verschiedenen Dateitypen entsprechend der Dateiendung und zählt deren Häufigkeit. Die Dateitypen werden in der Variable datatype und deren Häufigkeit in der Variable frequency gespeichert.
# Extrahiere Dateitypen aus Spalte des Dataframes. datatypes = df['relativePath'].str.rsplit('.', 1).str[1] # Zähle die Anzahl der Dateitypen und bilde diese in einem Series-Objekt ab. series = datatypes.value_counts() # Erzeuge zwei Listen aus dem Series-Objekt. datatype = list(series.index) frequency = list(series) # Erzeuge die Kategorie "andere", in der alle Dateitypen gesammelt werden, die weniger oder genau 20 mal auftauchen. andere = 0 for wert in frequency[:]: index = frequency.index(wert) if wert <= 20: andere += wert datatype.remove(datatype[index]) frequency.remove(wert) frequency.append(andere) datatype.append("andere") print(frequency) print(datatype)
[1383, 80, 41, 36, 21, 126] ['java', 'html', 'class', 'gif', 'txt', 'andere']
Apache-2.0
6. Dateitypen.ipynb
softvis-research/BeLL
4. VisualisierungDie Daten werden mittels eines Pie Charts visualisiert.
from IPython.display import display, HTML base_html = """ <!DOCTYPE html> <html> <head> <script type="text/javascript" src="http://kozea.github.com/pygal.js/javascripts/svg.jquery.js"></script> <script type="text/javascript" src="https://kozea.github.io/pygal.js/2.0.x/pygal-tooltips.min.js""></script> </head> <body> <figure> {rendered_chart} </figure> </body> </html> """ # Erstelle Pie Chart. import pygal pie_chart = pygal.Pie() pie_chart.title = 'Dateitypen' for einzelneDateitypen in datatype: index= datatype.index(einzelneDateitypen) anzahl=frequency[index] pie_chart.add(einzelneDateitypen, anzahl) display(HTML(base_html.format(rendered_chart=pie_chart.render(is_unicode=True))))
_____no_output_____
Apache-2.0
6. Dateitypen.ipynb
softvis-research/BeLL
Introduction to the Interstellar Medium Jonathan Williams Figure 6.3: portion of the Galactic plane in 21cm continuum showing bremsstrahlung and synchrotron sources
import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from astropy.io import fits from astropy.wcs import WCS from astropy.visualization import (ImageNormalize, SqrtStretch, LogStretch, AsinhStretch) %matplotlib inline fig = plt.figure(figsize=(14,7.5)) hdu = fits.open('g330to340.i.fits') wcs1 = WCS(hdu[0]) ax1 = fig.add_subplot(111, projection=wcs1) im1 = hdu[0].data hd1 = hdu[0].header hdu.close() #print(hd1) imin, imax = 380, 730 imcrop = im1[:, imin:imax] #print(imcrop.min(),imcrop.max()) norm = ImageNormalize(imcrop, vmin=-0.15, vmax=2.0, stretch=AsinhStretch(a=0.1)) ax1.imshow(imcrop, cmap='gray', origin='lower', norm=norm) ax1.set_xlim(0,350) ax1.set_ylim(0,180) plt.plot([0,350], [90,90], ls='dashed', color='white', lw=2) ax1.text(82, 45, 'HII', color='white', fontsize=18, fontweight='normal') ax1.text(316, 97, 'SNR', color='white', fontsize=18, fontweight='normal') # scale bar dx = hd1['CDELT1'] #print(dx) # 40'' per pixel, make bar 1 deg = 90 pix xbar = 90 x0 = 250 x1 = x0 + xbar y0 = 12 dy = 2 ax1.plot([x0,x1],[y0,y0], 'w-', lw=2) ax1.plot([x0,x0],[y0-dy,y0+dy], 'w-', lw=2) ax1.plot([x1,x1],[y0-dy,y0+dy], 'w-', lw=2) # this crashes binder #mpl.rc('text', usetex=True) #mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] #ax1.text(0.5*(x0+x1), y0+1.5*dy, r'$\boldsymbol{1^\circ}$', color='white', fontsize=24, fontweight='heavy', ha='center') # but this works ok ax1.text(0.5*(x0+x1), y0+1.5*dy, r'$1^\circ$', color='white', fontsize=24, fontweight='heavy', ha='center') ax1.text(0.03,0.91,'21cm continuum', {'color': 'w', 'fontsize': 28}, transform=ax1.transAxes) for i in (0,1): ax1.coords[i].set_ticks_visible(False) ax1.coords[i].set_ticklabel_visible(False) ax1.coords[i].set_ticks_visible(False) ax1.coords[i].set_ticklabel_visible(False) ax1.coords[i].set_axislabel('') ax1.coords[i].set_axislabel('') plt.tight_layout() plt.savefig('galactic_plane_continuum_21cm.pdf')
-0.15332128 7.7325406
CC0-1.0
ionized/galactic_plane_continuum_21cm.ipynb
CambridgeUniversityPress/IntroductionInterstellarMedium
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper data_dir = './data/simpsons/moes_tavern_lines.txt' text = helper.load_data(data_dir) # Ignore notice, since we don't use it for analysing the data text = text[81:]
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) scenes = text.split('\n\n') print('Number of scenes: {}'.format(len(scenes))) sentence_count_scene = [scene.count('\n') for scene in scenes] print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene))) sentences = [sentence for scene in scenes for sentence in scene.split('\n')] print('Number of lines: {}'.format(len(sentences))) word_count_sentence = [len(sentence.split()) for sentence in sentences] print('Average number of words in each line: {}'.format(np.average(word_count_sentence))) print() print('The sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
Dataset Stats Roughly the number of unique words: 11492 Number of scenes: 262 Average number of sentences in each scene: 15.251908396946565 Number of lines: 4258 Average number of words in each line: 11.50164396430249 The sentences 0 to 10: Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink. Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch. Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately? Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick. Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self. Homer_Simpson: I got my problems, Moe. Give me another one. Moe_Szyslak: Homer, hey, you should not drink to forget your problems. Barney_Gumble: Yeah, you should only drink to enhance your social skills.
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
import numpy as np import problem_unittests as tests def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ # TODO: Implement Function text = list(set(text)) #text_id = range(len(text)) #int_to_vocab = dict(zip(text_id, text)) #vocab_to_int = dict(zip(text, text_id)) int_to_vocab = {word_i: word for word_i, word in enumerate(text)} vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenize dictionary where the key is the punctuation and the value is the token """ # TODO: Implement Function keys = ['.', ',', '"', ';', '!', '?', '(', ')', '--','\n'] values = ['||Period||','||Comma||','||Quotation_Mark||','||Semicolon||','||Exclamation_mark||','||Question_mark||','||Left_Parentheses||','||Right_Parentheses||','||Dash||','||Return||'] token_lookup = dict(zip(keys,values)) return token_lookup """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
""" DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper import numpy as np import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
""" DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
TensorFlow Version: 1.0.0
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
def get_inputs(): """ Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate) """ # TODO: Implement Function Input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input') Targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets') LearningRate = tf.placeholder(dtype=tf.float32, name='learning_rate') return Input, Targets, LearningRate """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_inputs(get_inputs)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
def get_init_cell(batch_size, rnn_size): """ Create an RNN Cell and initialize it. :param batch_size: Size of batches :param rnn_size: Size of RNNs :return: Tuple (cell, initialize state) """ # TODO: Implement Function #rnn_layers = 2 lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) Cell = tf.contrib.rnn.MultiRNNCell([lstm]) #initial_state = Cell.zero_state(batch_size=tf.placeholder(dtype=tf.int32, shape=[]), dtype=tf.float32) InitialState = tf.identity(Cell.zero_state(batch_size, tf.float32), name = 'initial_state') #InitialState = tf.identity(initial_state, name='initial_state') return Cell, InitialState """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_init_cell(get_init_cell)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
def get_embed(input_data, vocab_size, embed_dim): """ Create embedding for <input_data>. :param input_data: TF placeholder for text input. :param vocab_size: Number of words in vocabulary. :param embed_dim: Number of embedding dimensions :return: Embedded input. """ # TODO: Implement Function embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) embed = tf.nn.embedding_lookup(embedding, input_data) return embed """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_embed(get_embed)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
def build_rnn(cell, inputs): """ Create a RNN using a RNN Cell :param cell: RNN Cell :param inputs: Input text data :return: Tuple (Outputs, Final State) """ # TODO: Implement Function Outputs, Final_State = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) FinalState = tf.identity(Final_State, name='final_state') return Outputs, FinalState """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_rnn(build_rnn)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim): """ Build part of the neural network :param cell: RNN cell :param rnn_size: Size of rnns :param input_data: Input data :param vocab_size: Vocabulary size :param embed_dim: Number of embedding dimensions :return: Tuple (Logits, FinalState) """ # TODO: Implement Function embedding = get_embed(input_data, vocab_size, embed_dim) Outputs, FinalState = build_rnn(cell, embedding) Logits = tf.contrib.layers.fully_connected(Outputs, vocab_size, activation_fn=None) return Logits, FinalState """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_nn(build_nn)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ # TODO: Implement Function n_batches = len(int_text)//(batch_size*seq_length) input_batch = np.array(int_text[0: n_batches * batch_size * seq_length]) target_batch = np.array(int_text[1: n_batches * batch_size * seq_length]) target_batch = np.append(target_batch, int_text[0]) input_batchs = np.split(input_batch.reshape(batch_size, -1), n_batches, 1) target_batchs = np.split(target_batch.reshape(batch_size, -1), n_batches, 1) get_batches = list(zip(input_batchs, target_batchs)) return np.array(get_batches) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_batches(get_batches)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
# Number of Epochs num_epochs = 100 # Batch Size batch_size = 156 # RNN Size rnn_size = 600 # Embedding Dimension Size embed_dim = 500 # Sequence Length seq_length = 14 # Learning Rate learning_rate = 0.001 # Show stats for every n number of batches show_every_n_batches = 100 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ save_dir = './save'
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Build the GraphBuild the graph using the neural network you implemented.
""" DON'T MODIFY ANYTHING IN THIS CELL """ from tensorflow.contrib import seq2seq train_graph = tf.Graph() with train_graph.as_default(): vocab_size = len(int_to_vocab) input_text, targets, lr = get_inputs() input_data_shape = tf.shape(input_text) cell, initial_state = get_init_cell(input_data_shape[0], rnn_size) logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function cost = seq2seq.sequence_loss( logits, targets, tf.ones([input_data_shape[0], input_data_shape[1]])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients)
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
""" DON'T MODIFY ANYTHING IN THIS CELL """ batches = get_batches(int_text, batch_size, seq_length) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(num_epochs): state = sess.run(initial_state, {input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { input_text: x, targets: y, initial_state: state, lr: learning_rate} train_loss, state, _ = sess.run([cost, final_state, train_op], feed) # Show every <show_every_n_batches> batches if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0: print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( epoch_i, batch_i, len(batches), train_loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_dir) print('Model Trained and Saved')
Epoch 0 Batch 0/31 train_loss = 8.825 Epoch 3 Batch 7/31 train_loss = 5.159 Epoch 6 Batch 14/31 train_loss = 4.528 Epoch 9 Batch 21/31 train_loss = 4.046 Epoch 12 Batch 28/31 train_loss = 3.626 Epoch 16 Batch 4/31 train_loss = 3.317 Epoch 19 Batch 11/31 train_loss = 3.031 Epoch 22 Batch 18/31 train_loss = 2.765 Epoch 25 Batch 25/31 train_loss = 2.474 Epoch 29 Batch 1/31 train_loss = 2.178 Epoch 32 Batch 8/31 train_loss = 2.101 Epoch 35 Batch 15/31 train_loss = 1.774 Epoch 38 Batch 22/31 train_loss = 1.655 Epoch 41 Batch 29/31 train_loss = 1.581 Epoch 45 Batch 5/31 train_loss = 1.388 Epoch 48 Batch 12/31 train_loss = 1.260 Epoch 51 Batch 19/31 train_loss = 1.038 Epoch 54 Batch 26/31 train_loss = 1.010 Epoch 58 Batch 2/31 train_loss = 0.891 Epoch 61 Batch 9/31 train_loss = 0.773 Epoch 64 Batch 16/31 train_loss = 0.718 Epoch 67 Batch 23/31 train_loss = 0.642 Epoch 70 Batch 30/31 train_loss = 0.591 Epoch 74 Batch 6/31 train_loss = 0.534 Epoch 77 Batch 13/31 train_loss = 0.482 Epoch 80 Batch 20/31 train_loss = 0.438 Epoch 83 Batch 27/31 train_loss = 0.359 Epoch 87 Batch 3/31 train_loss = 0.369 Epoch 90 Batch 10/31 train_loss = 0.338 Epoch 93 Batch 17/31 train_loss = 0.300 Epoch 96 Batch 24/31 train_loss = 0.291 Model Trained and Saved
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
""" DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params((seq_length, save_dir))
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Checkpoint
""" DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() seq_length, load_dir = helper.load_params()
_____no_output_____
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
def get_tensors(loaded_graph): """ Get input, initial state, final state, and probabilities tensor from <loaded_graph> :param loaded_graph: TensorFlow graph loaded from file :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) """ # TODO: Implement Function with loaded_graph.as_default() as g: InputTensor = loaded_graph.get_tensor_by_name("input:0") InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0") FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0") ProbsTensor = loaded_graph.get_tensor_by_name("probs:0") return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_tensors(get_tensors)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
def pick_word(probabilities, int_to_vocab): """ Pick the next word in the generated text :param probabilities: Probabilites of the next word :param int_to_vocab: Dictionary of word ids as the keys and words as the values :return: String of the predicted word """ # TODO: Implement Function pick_word = np.random.choice(len(int_to_vocab), 1, p=probabilities)[0] pick_word = int_to_vocab.get(pick_word) return pick_word """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_pick_word(pick_word)
Tests Passed
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
gen_length = 200 # homer_simpson, moe_szyslak, or Barney_Gumble prime_word = 'moe_szyslak' """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_dir + '.meta') loader.restore(sess, load_dir) # Get Tensors from loaded model input_text, initial_state, final_state, probs = get_tensors(loaded_graph) # Sentences generation setup gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {input_text: np.array([[1]])}) # Generate sentences for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [probs, final_state], {input_text: dyn_input, initial_state: prev_state}) pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\n ', '\n') tv_script = tv_script.replace('( ', '(') print(tv_script)
moe_szyslak: ah-ha, big mistake pal! hey moe, can you be the best book on you could never! homer_simpson:(getting idea) but you're dea-d-d-dead.(three stooges scared sound) grampa_simpson:(upbeat) i guess despite all sweet music, but then we pour it a beer at half something. lenny_leonard: hey, homer. r. homer_simpson: moe, it's called! moe_szyslak: guys, i'm gonna let him want to go to my dad homer_simpson:(to moe) thirty cases of cough syrup. sign in the way. barney_gumble: yeah, that's probably what i look at you, i'm too? moe_szyslak: oh, here. the audience is still love over. moe's_thoughts: this is kent brockman. and it begins," dear is to that! moe_szyslak:(laughs) if you want to be back. voice: excuse me, so you can either sit here in the back of my cruiser. homer_simpson: well if i only got their secrets. lenny_leonard:(amiable) amanda
MIT
tv-script-generation/dlnd_tv_script_generation.ipynb
duozhanggithub/Deep-Learning
Model metrics before removing outliers
reg = LazyRegressor() X = data.drop(columns = ["SalePrice"]) y = data["SalePrice"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42) models, _ = reg.fit(X_train, X_test, y_train, y_test) models
100%|██████████| 43/43 [00:36<00:00, 1.19it/s]
MIT
house_prices/analysis12.ipynb
randat9/House_Prices
Removing outliers
nan_columns = {column: data[column].isna().sum() for column in data.columns if data[column].isna().sum() > 0} nan_columns data["PoolQC"].sample(10) ordinal_common = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageQual', 'PoolQC'] outlier_removed_data = remove_outliers(data_no_empty_features, method="IsolationForest", threshold=0.1, model_kwargs = {}) reg = LazyRegressor() X = outlier_removed_data.drop(columns = ["SalePrice"]) y = outlier_removed_data["SalePrice"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) models, _ = reg.fit(X_train, X_test, y_train, y_test) models
_____no_output_____
MIT
house_prices/analysis12.ipynb
randat9/House_Prices
import pandas as pd from google.colab import drive drive.mount('/content/drive') %pwd %ls '/content/drive/My Drive/Machine Learning Final' pos_muts = pd.read_csv('/content/drive/My Drive/Machine Learning Final/H77_metadata.csv') freqs = pd.read_csv('/content/drive/My Drive/Machine Learning Final/HCV1a_TsMutFreq_195.csv') mut_rate = pd.read_csv('/content/drive/My Drive/Machine Learning Final/Geller.mutation.rates_update.csv') freqs.head() mut_rate.head() pos_muts.head() # Start Calculating costs
_____no_output_____
MIT
Big_Dreams.ipynb
Lore8614/Lore8614.github.io
Nested cross-validationIn this notebook, we show a pattern called **nested cross-validation** whichshould be used when you want to both evaluate a model and tune themodel's hyperparameters.Cross-validation is a powerful tool to evaluate the statistical performanceof a model. It is also used to select the best model from a pool of models.This pool of models can be the same family of predictor but with differentparameters. In this case, we call this procedure **hyperparameter tuning**.We could also imagine that we would like to choose among heterogeneous modelsthat will similarly use the cross-validation.Before we go into details regarding the nested cross-validation, we willfirst recall the pattern used to fine tune a model's hyperparameters.Let's load the breast cancer dataset.
from sklearn.datasets import load_breast_cancer data, target = load_breast_cancer(return_X_y=True)
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
Now, we'll make a minimal example using the utility `GridSearchCV` to findthe best parameters via cross-validation.
from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC param_grid = {"C": [0.1, 1, 10], "gamma": [.01, .1]} model_to_tune = SVC() search = GridSearchCV(estimator=model_to_tune, param_grid=param_grid, n_jobs=2) search.fit(data, target)
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
We recall that `GridSearchCV` will train a model with some specific parameteron a training set and evaluate it on testing. However, this evaluation isdone via cross-validation using the `cv` parameter. This procedure isrepeated for all possible combinations of parameters given in `param_grid`.The attribute `best_params_` will give us the best set of parameters thatmaximize the mean score on the internal test sets.
print(f"The best parameter found are: {search.best_params_}")
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
We can now show the mean score obtained using the parameter `best_score_`.
print(f"The mean score in CV is: {search.best_score_:.3f}")
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
At this stage, one should be extremely careful using this score. Themisinterpretation would be the following: since the score was computed on atest set, it could be considered our model's testing score.However, we should not forget that we used this score to pick-up the bestmodel. It means that we used knowledge from the test set (i.e. test score) todecide our model's training parameter.Thus, this score is not a reasonable estimate of our testing error.Indeed, we can show that it will be too optimistic in practice. The good wayis to use a "nested" cross-validation. We will use an inner cross-validationcorresponding to the previous procedure shown to optimize thehyperparameters. We will also include this procedure within an outercross-validation, which will be used to estimate the testing error ofour tuned model.In this case, our inner cross-validation will always get the training set ofthe outer cross-validation, making it possible to compute the testingscore on a completely independent set.We will show below how we can create such nested cross-validation and obtainthe testing score.
from sklearn.model_selection import cross_val_score, KFold # Declare the inner and outer cross-validation inner_cv = KFold(n_splits=4, shuffle=True, random_state=0) outer_cv = KFold(n_splits=4, shuffle=True, random_state=0) # Inner cross-validation for parameter search model = GridSearchCV( estimator=model_to_tune, param_grid=param_grid, cv=inner_cv, n_jobs=2) # Outer cross-validation to compute the testing score test_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2) print(f"The mean score using nested cross-validation is: " f"{test_score.mean():.3f} +/- {test_score.std():.3f}")
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
In the example above, the reported score is more trustful and should be closeto production's expected statistical performance.We will illustrate the difference between the nested and non-nestedcross-validation scores to show that the latter one will be too optimistic inpractice. In this regard, we will repeat several time the experiment andshuffle the data differently. Besides, we will store the score obtain withand without the nested cross-validation.
test_score_not_nested = [] test_score_nested = [] N_TRIALS = 20 for i in range(N_TRIALS): inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring model = GridSearchCV(estimator=model_to_tune, param_grid=param_grid, cv=inner_cv, n_jobs=2) model.fit(data, target) test_score_not_nested.append(model.best_score_) # Nested CV with parameter optimization test_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2) test_score_nested.append(test_score.mean())
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
We can merge the data together and make a box plot of the two strategies.
import pandas as pd all_scores = { "Not nested CV": test_score_not_nested, "Nested CV": test_score_nested, } all_scores = pd.DataFrame(all_scores) import matplotlib.pyplot as plt color = {"whiskers": "black", "medians": "black", "caps": "black"} all_scores.plot.box(color=color, vert=False) plt.xlabel("Accuracy") _ = plt.title("Comparison of mean accuracy obtained on the test sets with\n" "and without nested cross-validation")
_____no_output_____
CC-BY-4.0
notebooks/cross_validation_nested.ipynb
nish2612/scikit-learn-mooc
![image.png](attachment:image.png)
(X_train,y_train),(X_test,y_test) = datasets.mnist.load_data() X_train.shape X_train = X_train.reshape(60000,28,28,1) X_train.shape X_train.shape plt.imshow(X_train[0]) X_train = X_train/255 X_test = X_test/255 mnist_cnn = models.Sequential([ layers.Conv2D(filters=10, kernel_size=(5,5), activation='relu',input_shape=(28,28,1)), layers.MaxPooling2D(2,2), # layers.Conv2D(filters=5, kernel_size=(3,3), activation='relu',input_shape=(28,28,1)), # layers.MaxPooling2D(2,2), layers.Flatten(), layers.Dense(50,activation='relu'), layers.Dense(10,activation='softmax') ]) mnist_cnn.compile( optimizer='adam', loss='sparse_categorical_crossentropy', #categories 1,2,3... sparse because output is integer metrics=['accuracy'] ) mnist_cnn.fit(X_train,y_train,epochs=10) X_test.shape X_test = X_test.reshape(10000,28,28,1) mnist_cnn.evaluate(X_test,y_test) y_pred = mnist_cnn.predict(X_test) y_pred_classes = [np.argmax(element) for element in y_pred] cm = confusion_matrix(y_test,y_pred_classes) cm import seaborn as sn plt.figure(figsize=(10,7)) sn.heatmap(cm,annot=True,fmt='d') plt.xlabel('Predicted') plt.ylabel('Truth')
_____no_output_____
MIT
code/8_CNN_cifar10_mnist.ipynb
Akshatha-Jagadish/DL_topics
EDA for Import and Export Trade Volumes Binational trade relationship between Mexico and the United States
#import key libraries import pandas as pd import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
MIT
jupyter-notebook/eda_import_exports.ipynb
NanceCA/binational-trade-volumes
Dataset 1: General Imports from Mexico to the United States
imports = pd.read_csv("./data/usitc/total-imports-mx2us.csv") ## data to be read includes the customs value of the import and the year imports.shape imports.head() #note that the customs_value and the dollar_amount are the same just different data types list(imports.columns) imports['imports'].describe() imports['dollar_amount'].describe() imports['customs_value'].plot(kind="bar") ## confirming that the data is linear plt.scatter(imports["year"],imports['customs_value'],color="blue") plt.title('Imports from Mexico to the US, Annual') plt.xlabel('year') plt.ylabel('customs value e11') plt.show() ##amazing! Looks pretty linear to me
_____no_output_____
MIT
jupyter-notebook/eda_import_exports.ipynb
NanceCA/binational-trade-volumes
Dataset 2 Exports from US to Mexico
exports = pd.read_csv("./data/usitc/total-exports-us2mx.csv") exports.shape exports.head() list(exports.columns) exports['exports'].describe() plt.scatter(exports["year"],exports['exports'],color="green") plt.title('Exports from US to Mexico, Annual') plt.xlabel('year') plt.ylabel('FAS Value e11') plt.show() ##generally pretty linear ## Combining both exports and imports ##combine both vectors on one graph plt.plot(exports["year"],exports['exports'],color="green") plt.scatter(imports["year"],imports['imports'],color="blue") plt.title("Plotting imports and exports") plt.xlabel("Year") plt.ylabel("Value") plt.legend() plt.show()
_____no_output_____
MIT
jupyter-notebook/eda_import_exports.ipynb
NanceCA/binational-trade-volumes
Data preprocessing
# imports year_var = list(imports['year']) print(year_var) dollar = list(imports["dollar_amount"]) print(dollar) def pre_process(year, dollar): print("[",year,",",dollar,"]",",") pre_process(1996, 2)
_____no_output_____
MIT
jupyter-notebook/eda_import_exports.ipynb
NanceCA/binational-trade-volumes
Running descriptive statistics
# Pulling in descriptive statistics on IMPORTS from scipy import stats stats.describe(ytrain_pred) imports['imports'].describe() exports["exports"].describe()
_____no_output_____
MIT
jupyter-notebook/eda_import_exports.ipynb
NanceCA/binational-trade-volumes
1: Introduction To The Dataset
data = open('US_births_1994-2003_CDC_NCHS.csv','r').read().split('\n') data[:10]
_____no_output_____
MIT
Explore U.S. Births/Basics.ipynb
vipmunot/Data-Science-Projects
2: Converting Data Into A List Of Lists
def read_csv(filename,header = False): final_list = [] read_data = open(filename,'r').read().split('\n')[1:] if header == True: read_data = open(filename,'r').read().split('\n')[1:] else: read_data = open(filename,'r').read().split('\n') for item in read_data: int_fields = [] string_fields = item.split(',') for val in string_fields: int_fields.append(int(val)) final_list.append(int_fields) return(final_list) cdc_list = read_csv('US_births_1994-2003_CDC_NCHS.csv',header = True) cdc_list[:10]
_____no_output_____
MIT
Explore U.S. Births/Basics.ipynb
vipmunot/Data-Science-Projects
3: Calculating Number Of Births Each Month
def month_births(data): births_per_month = {} for item in data: if item[1] in births_per_month.keys(): births_per_month[item[1]] += item[4] else: births_per_month[item[1]] = item[4] return(births_per_month) cdc_month_births = month_births(cdc_list) cdc_month_births def dow_births(data): births_per_dow = {} for item in data: if item[3] in births_per_dow.keys(): births_per_dow[item[3]] += item[4] else: births_per_dow[item[3]] = item[4] return(births_per_dow) cdc_day_births = dow_births(cdc_list) cdc_day_births
_____no_output_____
MIT
Explore U.S. Births/Basics.ipynb
vipmunot/Data-Science-Projects
5: Creating A More General Function
def calc_counts(data,column): birth = {} for item in data: if item[column] in birth.keys(): birth[item[column]] += item[4] else: birth[item[column]] = item[4] return(birth) cdc_year_births = calc_counts(cdc_list, 0) cdc_month_births = calc_counts(cdc_list, 1) cdc_dom_births = calc_counts(cdc_list, 2) cdc_dow_births = calc_counts(cdc_list, 3) cdc_year_births cdc_month_births cdc_dom_births cdc_dow_births def min_max(dictionary): min_val = min(dictionary.items(), key=lambda k: k[1]) max_val = max(dictionary.items(), key=lambda k: k[1]) return("Minimum Value:%s Maximum Value:%s"%(min_val,max_val)) min_max(cdc_dow_births)
_____no_output_____
MIT
Explore U.S. Births/Basics.ipynb
vipmunot/Data-Science-Projects
Classes and Objects in Python Welcome! Objects in programming are like objects in real life. Like life, there are different classes of objects. In this notebook, we will create two classes called Circle and Rectangle. By the end of this notebook, you will have a better idea about : what a class is what an attribute is what a method is Don’t worry if you don’t get it the first time, as much of the terminology is confusing. Don’t forget to do the practice tests in the notebook. Table of Contents Introduction to Classes and Objects Creating a class Instances of a Class: Objects and Attributes Methods Creating a class Creating an instance of a class Circle The Rectangle Class Estimated time needed: 40 min Introduction to Classes and Objects Creating a Class The first part of creating a class is giving it a name: In this notebook, we will create two classes, Circle and Rectangle. We need to determine all the data that make up that class, and we call that an attribute. Think about this step as creating a blue print that we will use to create objects. In figure 1 we see two classes, circle and rectangle. Each has their attributes, they are variables. The class circle has the attribute radius and color, while the rectangle has the attribute height and width. Let’s use the visual examples of these shapes before we get to the code, as this will help you get accustomed to the vocabulary. Figure 1: Classes circle and rectangle, and each has their own attributes. The class circle has the attribute radius and colour, the rectangle has the attribute height and width. Instances of a Class: Objects and Attributes An instance of an object is the realisation of a class, and in Figure 2 we see three instances of the class circle. We give each object a name: red circle, yellow circle and green circle. Each object has different attributes, so let's focus on the attribute of colour for each object. Figure 2: Three instances of the class circle or three objects of type circle. The colour attribute for the red circle is the colour red, for the green circle object the colour attribute is green, and for the yellow circle the colour attribute is yellow. Methods Methods give you a way to change or interact with the object; they are functions that interact with objects. For example, let’s say we would like to increase the radius by a specified amount of a circle. We can create a method called **add_radius(r)** that increases the radius by **r**. This is shown in figure 3, where after applying the method to the "orange circle object", the radius of the object increases accordingly. The “dot” notation means to apply the method to the object, which is essentially applying a function to the information in the object. Figure 3: Applying the method “add_radius” to the object orange circle object. Creating a Class Now we are going to create a class circle, but first, we are going to import a library to draw the objects:
# Import the library import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
MIT
Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb
amitkrishna/IBM-DataScience
The first step in creating your own class is to use the class keyword, then the name of the class as shown in Figure 4. In this course the class parent will always be object: Figure 4: Three instances of the class circle or three objects of type circle. The next step is a special method called a constructor &95;&95;init&95;&95;, which is used to initialize the object. The input are data attributes. The term self contains all the attributes in the set. For example the self.color gives the value of the attribute color and self.radius will give you the radius of the object. We also have the method add_radius() with the parameter r, the method adds the value of r to the attribute radius. To access the radius we use the syntax self.radius. The labeled syntax is summarized in Figure 5: Figure 5: Labeled syntax of the object circle. The actual object is shown below. We include the method drawCircle to display the image of a circle. We set the default radius to 3 and the default colour to blue:
# Create a class Circle class Circle(object): # Constructor def __init__(self, radius=3, color='blue'): self.radius = radius self.color = color # Method def add_radius(self, r): self.radius = self.radius + r return(self.radius) # Method def drawCircle(self): plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color)) plt.axis('scaled') plt.show()
_____no_output_____
MIT
Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb
amitkrishna/IBM-DataScience
Creating an instance of a class Circle Let’s create the object RedCircle of type Circle to do the following:
# Create an object RedCircle RedCircle = Circle(10, 'red')
_____no_output_____
MIT
Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb
amitkrishna/IBM-DataScience
We can use the dir command to get a list of the object's methods. Many of them are default Python methods.
# Find out the methods can be used on the object RedCircle dir(RedCircle)
_____no_output_____
MIT
Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb
amitkrishna/IBM-DataScience
We can look at the data attributes of the object:
# Print the object attribute radius RedCircle.radius # Print the object attribute color RedCircle.color
_____no_output_____
MIT
Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb
amitkrishna/IBM-DataScience
We can change the object's data attributes:
# Set the object attribute radius RedCircle.radius = 1 RedCircle.radius
_____no_output_____
MIT
Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb
amitkrishna/IBM-DataScience