markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Convert from Radians to Degrees The above answers will be in radians, use the following code to convert to degrees.
#substitute x_c and x_i as needed z=c.phase(x_c) m.degrees(z) print("Angle in degrees =",m.degrees(z))
Angle in degrees = -81.07294513104007
MIT
Applications/ENGR 202 Solver.ipynb
smithrockmaker/ENGR213
Simple Circuit in Series
# For following three cells, if reactance is already given, replace "xind" or"xcap" with corresponding j value # Resistor value is overwritten from previous cells when changed here # Not all simple circuits will have all three components. Modify as needed. # Original formula - series_comb = r + ind + cap r = 100 # Resistor Value ind = 0 + xind*1j cap = 0 - xcap*1j series_comb = r + ind + cap print("Series Rectangular Form =",series_comb)
Series Rectangular Form = (100+6919.805079547168j)
MIT
Applications/ENGR 202 Solver.ipynb
smithrockmaker/ENGR213
Simple Parallel Circuit - Product/Sum
# Product sum rule works only with 2 components # Original Formula - prod_sum = res*cap/(res + cap) ind = 0 + xind*1j cap = 0 + xcap*1j res = 100 prod_sum = res*cap/(res + cap) print("Product/sum Rectangular Form =",prod_sum)
Product/sum Rectangular Form = (97.59201358307332-15.329717646080926j)
MIT
Applications/ENGR 202 Solver.ipynb
smithrockmaker/ENGR213
Simple Parallel Circuit
# Use as many components as necessary # Original formula - parallel_comb = 1/(1/res + 1/ind + 1/cap) ind = 0 + xind*1j cap = 0 + xcap*1j res = 100 parallel_comb = 1/(1/res + 1/ind + 1/cap) print("Parallel Rectangular Form =",parallel_comb)
Parallel Rectangular Form = (98.04620253923555-13.840607701931122j)
MIT
Applications/ENGR 202 Solver.ipynb
smithrockmaker/ENGR213
Current Solver
# Make sure to use the parallel cell that IS NOT product/sum # Copy and paste cur_ind or cur_cap sections as necessary to account for all components. Some code modifaction/addition may be required. # This cell useful as is for one of each component. # Once previous cells are complete, this will populate automatically EXCEPT for E E = 10 #Equivalent Voltage Z_rect = parallel_comb Z_polar = c.polar(Z_rect) print("Z Polar = ",Z_polar,"\n") print(" Z Rectangular =",parallel_comb,"\n") cur_source = E/Z_rect cur_source_p = c.polar(cur_source) z=c.phase(cur_source) m.degrees(z) print("Source Current =",cur_source,"\n","Source Current, Polar =",cur_source_p,"\n","Angle = ",m.degrees(z),"\n") cur_cap = cur_source*Z_rect/cap cur_cap_p = c.polar(cur_cap) z=c.phase(cur_cap) m.degrees(z) print("Capacitor Current =",cur_cap,"\n","Capacitor Current, Polar =",cur_cap_p,"\n","Angle = ",m.degrees(z),"\n") cur_ind = cur_source*Z_rect/ind cur_ind_p = c.polar(cur_ind) z=c.phase(cur_ind) m.degrees(z) print("inductor Current =",cur_ind,"\n","Inductor Current, Polar =",cur_ind_p,"\n","Angle = ",m.degrees(z),"\n") cur_res = cur_source*Z_rect/res cur_res_p = c.polar(cur_res) z=c.phase(cur_res) m.degrees(z) print("Resistor Current =",cur_res,"\n","Resistor Current, Polar =",cur_res_p,"\n","Angle = ",m.degrees(z),"\n")
Z Polar = (99.01828242260899, -0.14023751838586943) Z Rectangular = (98.04620253923555-13.840607701931122j) Source Current = (0.1+0.014116413837030016j) Source Current, Polar = (0.1009914508244054, 0.14023751838586943) Angle = 8.035017932898603 Capacitor Current = (-0+0.015707963267948967j) Capacitor Current, Polar = (0.015707963267948967, 1.5707963267948966) Angle = 90.0 inductor Current = -0.0015915494309189533j Inductor Current, Polar = (0.0015915494309189533, -1.5707963267948966) Angle = -90.0 Resistor Current = (0.1+0j) Resistor Current, Polar = (0.1, 0.0) Angle = 0.0
MIT
Applications/ENGR 202 Solver.ipynb
smithrockmaker/ENGR213
Series-Parallel Circuits
# Organization cell for component values # Inductors z1 = 200*1j # Resistors z2 = 300 z3 = 270 #Capacitors z4 = -1500*1j # This cell is ambiguous with just z values to make it easy to modify. Keep track of z values. # Original Form of equation - parallel_react = 1/(1/z1+1/z2+1/(z3+z4)) parallel_react = 1/(1/z1+1/z2+1/(z3+z4)) parallel_polar = c.polar(parallel_react) print("Z Rectangular =",parallel_react,"\n","Z Polar =",parallel_polar)
Z Rectangular = (111.7846057266418+141.10138457782585j) Z Polar = (180.01499606210666, 0.9008118071374078)
MIT
Applications/ENGR 202 Solver.ipynb
smithrockmaker/ENGR213
TPU
AUTO = tf.data.experimental.AUTOTUNE # Detect hardware, return appropriate distribution strategy try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection. No parameters necessary if TPU_NAME environment variable is set. On Kaggle this is always the case. print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() # default distribution strategy in Tensorflow. Works on CPU and single GPU. print("REPLICAS: ", strategy.num_replicas_in_sync) # Data access GCS_DS_PATH = KaggleDatasets().get_gcs_path() from matplotlib import pyplot as plt img = plt.imread('/kaggle/input/zindi-disease/train/healthy_wheat/4YI63K.jpg') print(img.shape) plt.imshow(img) path='/kaggle/input/zindi-disease/' sub = pd.read_csv(path + 'sample_submission.csv') print(len(sub)) sub.head() dic = {} for _,_, filenames in os.walk(path + '/test/'): for file in filenames: name, extension = os.path.splitext(file) dic[name] = extension test = sub[['ID']].copy() test['ID']=test['ID'].apply(lambda x: x + dic[x]) test['ID']='/test/' + test['ID'] print(len(test)) test.head() plt.imshow(plt.imread(path + 'test/643083.JPG')) test_paths = test.ID.apply(lambda x: GCS_DS_PATH + x).values train=pd.DataFrame(columns=['ID','leaf_rust','stem_rust','healthy_wheat']) train.head() fn_hw = [] for _,_, filenames in os.walk(path + '/train/healthy_wheat/'): for filename in filenames: fn_hw.append('/train/healthy_wheat/' + filename) d_hw = {'ID': fn_hw, 'leaf_rust': 0, 'stem_rust':0, 'healthy_wheat':1} train=train.append(pd.DataFrame(d_hw)) print(len(train)) train.head() fn_lr = [] for _,_, filenames in os.walk(path + '/train/leaf_rust/'): for filename in filenames: fn_lr.append('/train/leaf_rust/' + filename) d_lr = {'ID': fn_lr, 'leaf_rust': 1, 'stem_rust':0, 'healthy_wheat':0} train=train.append(pd.DataFrame(d_lr)) print(len(train)) train.tail() fn_sr = [] for _,_, filenames in os.walk(path + '/train/stem_rust/'): for filename in filenames: fn_sr.append('/train/stem_rust/' + filename) d_sr = {'ID': fn_sr, 'leaf_rust': 0, 'stem_rust':1, 'healthy_wheat':0} train=train.append(pd.DataFrame(d_sr)) print(len(train)) train.tail() train_paths = train.ID.apply(lambda x: GCS_DS_PATH + x).values train_labels = train.loc[:, 'leaf_rust':].astype('int64').values train_labels type(train_labels[0][0]) nb_classes = 3 BATCH_SIZE = 8 * strategy.num_replicas_in_sync img_size = 768 EPOCHS = 40 def decode_image(filename, label=None, image_size=(img_size, img_size)): bits = tf.io.read_file(filename) image = tf.image.decode_jpeg(bits, channels=3) image = tf.cast(image, tf.float32) / 255.0 image = tf.image.resize(image, image_size) if label is None: return image else: return image, label def data_augment(image, label=None, seed=2020): image = tf.image.random_flip_left_right(image, seed=seed) image = tf.image.random_flip_up_down(image, seed=seed) if label is None: return image else: return image, label def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): # returns 3x3 transformmatrix which transforms indicies # CONVERT DEGREES TO RADIANS rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. # ROTATION MATRIX c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape( tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3] ) # SHEAR MATRIX c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape( tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3] ) # ZOOM MATRIX zoom_matrix = tf.reshape( tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3] ) # SHIFT MATRIX shift_matrix = tf.reshape( tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3] ) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) def transform(image,label): # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3] # output - image randomly rotated, sheared, zoomed, and shifted DIM = img_size XDIM = DIM%2 #fix for size 331 rot = 15. * tf.random.normal([1],dtype='float32') shr = 5. * tf.random.normal([1],dtype='float32') h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. h_shift = 16. * tf.random.normal([1],dtype='float32') w_shift = 16. * tf.random.normal([1],dtype='float32') # GET TRANSFORMATION MATRIX m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) # LIST DESTINATION PIXEL INDICES x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM ) y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] ) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack( [x,y,z] ) # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) # FIND ORIGIN PIXEL VALUES idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] ) d = tf.gather_nd(image,tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]),label train_dataset = ( tf.data.Dataset .from_tensor_slices((train_paths, train_labels)) .map(decode_image, num_parallel_calls=AUTO) .map(data_augment, num_parallel_calls=AUTO) .map(transform, num_parallel_calls=AUTO) .repeat() .shuffle(512) .batch(BATCH_SIZE) .prefetch(AUTO) ) test_dataset = ( tf.data.Dataset .from_tensor_slices(test_paths) .map(decode_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) ) LR_START = 0.00001 LR_MAX = 0.0001 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 25 LR_SUSTAIN_EPOCHS = 3 LR_EXP_DECAY = .8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x) for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) def get_model(): base_model = efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(img_size, img_size, 3)) x = base_model.output predictions = Dense(nb_classes, activation="softmax")(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope(): model = get_model() model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy']) %%time model.fit( train_dataset, steps_per_epoch=train_labels.shape[0] // BATCH_SIZE, callbacks=[lr_callback], epochs=EPOCHS ) %%time probs = model.predict(test_dataset) sub.loc[:, 'leaf_rust':] = probs sub.to_csv('submission.csv', index=False) sub.head()
_____no_output_____
MIT
Image Classification/CGIAR Computer Vision for Crop Disease/recongratulationsyoure3iniclrworkshopchallenge1/model4.ipynb
ZindiAfrica/Computer-Vision
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Find the highest product of three numbers in a list.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Is the input a list of integers? * Yes* Can we get negative inputs? * Yes* Can there be duplicate entires in the input? * Yes* Will there always be at least three integers? * No* Can we assume the inputs are valid? * No, check for None input* Can we assume this fits memory? * Yes Test Cases* None -> TypeError* Less than three ints -> ValueError* [5, -2, 3] -> -30* [5, -2, 3, 1, -1, 4] -> 60 Algorithm Brute force:Use three loops and multiple each numbers.Complexity:* Time: O(n^3)* Space: O(1) Sorting:Sort the list, multiply the last three elements.Complexity:* Time: O(n log(n))* Space: O(1) Greedy: 0 1 2 3 4 5[5, -2, 3, 1, -1, 4] -> 60max_prod_of_three = -30max_prod_of_two = -10max_num = 5min_prod_of_two = -10min_num = -2 0 1 2 3 4 5[5, -2, 3, 1, -1, 4] -> 60 ^max_prod_of_three = -30max_prod_of_two = 15max_num = 5min_prod_of_two = -10min_num = -2 0 1 2 3 4 5[5, -2, 3, 1, -1, 4] -> 60 ^max_prod_of_three = 15max_prod_of_two = 15max_num = 5min_prod_of_two = -10min_num = -2 0 1 2 3 4 5[5, -2, 3, 1, -1, 4] -> 60 ^max_prod_of_three = 15max_prod_of_two = 15max_num = 5min_prod_of_two = -10min_num = -2 0 1 2 3 4 5[5, -2, 3, 1, -1, 4] -> 60 ^max_prod_of_three = 60max_prod_of_two = 15max_num = 5min_prod_of_two = -10min_num = -2Complexity:* Time: O(n)* Space: O(1) Code
class Solution(object): def max_prod_three_nlogn(self, array): if array is None: raise TypeError('array cannot be None') if len(array) < 3: raise ValueError('array must have 3 or more ints') array.sort() product = 1 for item in array[-3:]: product *= item return product def max_prod_three(self, array): if array is None: raise TypeError('array cannot be None') if len(array) < 3: raise ValueError('array must have 3 or more ints') curr_max_prod_three = array[0] * array[1] * array[2] max_prod_two = array[0] * array[1] min_prod_two = array[0] * array[1] max_num = max(array[0], array[1]) min_num = min(array[0], array[1]) for i in range(2, len(array)): curr_max_prod_three = max(curr_max_prod_three, max_prod_two * array[i], min_prod_two * array[i]) max_prod_two = max(max_prod_two, max_num * array[i], min_num * array[i]) min_prod_two = min(min_prod_two, max_num * array[i], min_num * array[i]) max_num = max(max_num, array[i]) min_num = min(min_num, array[i]) return curr_max_prod_three
_____no_output_____
Apache-2.0
online_judges/prod_three/prod_three_solution.ipynb
sophomore99/PythonInterective
Unit Test
%%writefile test_prod_three.py from nose.tools import assert_equal, assert_raises class TestProdThree(object): def test_prod_three(self): solution = Solution() assert_raises(TypeError, solution.max_prod_three, None) assert_raises(ValueError, solution.max_prod_three, [1, 2]) assert_equal(solution.max_prod_three([5, -2, 3]), -30) assert_equal(solution.max_prod_three([5, -2, 3, 1, -1, 4]), 60) print('Success: test_prod_three') def main(): test = TestProdThree() test.test_prod_three() if __name__ == '__main__': main() %run -i test_prod_three.py
Success: test_prod_three
Apache-2.0
online_judges/prod_three/prod_three_solution.ipynb
sophomore99/PythonInterective
Unzipping and Zipping FilesAs you are probably aware, files can be compressed to a zip format. Often people use special programs on their computer to unzip these files, luckily for us, Python can do the same task with just a few simple lines of code. Create Files to Compress
# slashes may need to change for MacOS or Linux f = open("new_file.txt",'w+') f.write("Here is some text") f.close() # slashes may need to change for MacOS or Linux f = open("new_file2.txt",'w+') f.write("Here is some text") f.close()
_____no_output_____
MIT
4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb
impastasyndrome/Lambda-Resource-Static-Assets
Zipping FilesThe [zipfile library](https://docs.python.org/3/library/zipfile.html) is built in to Python, we can use it to compress folders or files. To compress all files in a folder, just use the os.walk() method to iterate this process for all the files in a directory.
import zipfile
_____no_output_____
MIT
4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb
impastasyndrome/Lambda-Resource-Static-Assets
Create Zip file first , then write to it (the write step compresses the files.)
comp_file = zipfile.ZipFile('comp_file.zip','w') comp_file.write("new_file.txt",compress_type=zipfile.ZIP_DEFLATED) comp_file.write('new_file2.txt',compress_type=zipfile.ZIP_DEFLATED) comp_file.close()
_____no_output_____
MIT
4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb
impastasyndrome/Lambda-Resource-Static-Assets
Extracting from Zip FilesWe can easily extract files with either the extractall() method to get all the files, or just using the extract() method to only grab individual files.
zip_obj = zipfile.ZipFile('comp_file.zip','r') zip_obj.extractall("extracted_content")
_____no_output_____
MIT
4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb
impastasyndrome/Lambda-Resource-Static-Assets
________ Using shutil libraryOften you don't want to extract or archive individual files from a .zip, but instead archive everything at once. The shutil library that is built in to python has easy to use commands for this:
import shutil
_____no_output_____
MIT
4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb
impastasyndrome/Lambda-Resource-Static-Assets
The shutil library can accept a format parameter, `format` is the archive format: one of "zip", "tar", "gztar", "bztar",or "xztar".
pwd directory_to_zip='C:\\Users\\Marcial\\Pierian-Data-Courses\\Complete-Python-3-Bootcamp\\12-Advanced Python Modules' # Creating a zip archive output_filename = 'example' # Just fill in the output_filename and the directory to zip # Note this won't run as is because the variable are undefined shutil.make_archive(output_filename,'zip',directory_to_zip) # Extracting a zip archive # Notice how the parameter/argument order is slightly different here shutil.unpack_archive(output_filename,dir_for_extract_result,'zip')
_____no_output_____
MIT
4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb
impastasyndrome/Lambda-Resource-Static-Assets
Causal Inference In Statistics - A Primer 3.1 Interventions Bruno Gonçalves www.data4sci.com @bgoncalves, @data4sci
from collections import Counter from pprint import pprint import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt from CausalModel import CausalModel import watermark %load_ext watermark %matplotlib inline
_____no_output_____
MIT
3.1 - Interventions.ipynb
m5l14i11/Causality
We start by print out the versions of the libraries we're using for future reference
%watermark -n -v -m -g -iv
watermark 2.0.2 json 2.0.9 pandas 1.0.1 matplotlib 3.1.3 numpy 1.18.1 autopep8 1.5 Sun Oct 18 2020 CPython 3.7.3 IPython 6.2.1 compiler : Clang 4.0.1 (tags/RELEASE_401/final) system : Darwin release : 19.6.0 machine : x86_64 processor : i386 CPU cores : 8 interpreter: 64bit Git hash : 96fdced5915a840d6140e161f1d2827cf29f6e31
MIT
3.1 - Interventions.ipynb
m5l14i11/Causality
Load default figure style
plt.style.use('./d4sci.mplstyle') colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
_____no_output_____
MIT
3.1 - Interventions.ipynb
m5l14i11/Causality
Graph Surgery
G = CausalModel() G.add_causation('Ux', 'X') G.add_causation('Uy', 'Y') G.add_causation('Uz', 'Z') G.add_causation('Z', 'X') G.add_causation('Z', 'Y') G.pos = {'Z': (0, 1), 'X': (-1, 0), 'Y':(1, 0), 'Uz':(0, 2), 'Ux':(-1, 1), 'Uy': (1,1)} fig, ax = plt.subplots(1, figsize=(3, 2.5)) G.plot(ax=ax) G.save_model('dags/Primer.Fig.3.1.dot') G2 = G.copy() G2.dag.remove_edges_from(list(G.dag.in_edges('X'))) G2.dag.remove_node('Ux') del G2.pos['Ux'] fig, ax = plt.subplots(1, figsize=(3.2, 2.5)) G2.plot(ax=ax) G2.save_model('dags/Primer.Fig.3.2.dot') G = CausalModel() G.add_causation('Ux', 'X') G.add_causation('Uy', 'Y') G.add_causation('Uz', 'Z') G.add_causation('Z', 'X') G.add_causation('Z', 'Y') G.add_causation('X', 'Y') G.pos = {'Z': (0, 1), 'X': (-1, 0), 'Y':(1, 0), 'Uz':(0, 2), 'Ux':(-1, 1), 'Uy': (1,1)} fig, ax = plt.subplots(1, figsize=(3, 2.5)) G.plot(ax=ax) G.save_model('dags/Primer.Fig.3.3.dot') G2 = G.intervention_graph('X', drop_nodes=True) fig, ax = plt.subplots(1, figsize=(3, 2.5)) G2.plot(ax=ax) G.save_model('dags/Primer.Fig.3.4.dot')
_____no_output_____
MIT
3.1 - Interventions.ipynb
m5l14i11/Causality
Ex - GroupBy Introduction:GroupBy can be summarized as Split-Apply-Combine.Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.Check out this [Diagram](http://i.imgur.com/yjNkiwL.png) Step 1. Import the necessary libraries
import pandas as pd
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv). Step 3. Assign it to a variable called drinks.
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv' drinks = pd.read_csv(url) drinks
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Step 4. Which continent drinks more beer on average?
drinks.groupby('continent').beer_servings.mean()
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Step 5. For each continent print the statistics for wine consumption.
drinks.groupby('continent').wine_servings.describe()
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Step 6. Print the mean alcohol consumption per continent for every column
drinks.groupby('continent').mean()
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Step 7. Print the median alcohol consumption per continent for every column
drinks.groupby('continent').median()
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Step 8. Print the mean, min and max values for spirit consumption. This time output a DataFrame
drinks.groupby('continent').spirit_servings.agg(['mean', 'min', 'max'])
_____no_output_____
BSD-3-Clause
03_Grouping/Alcohol_Consumption/Exercise.ipynb
coderhh/pandas_exercises
Week 3, Day 1 (Dataset Preparation and Arrangement)> Welcome to first day (Week 3) of the McE-51069 course.- sticky_rank: 7- toc: true- badges: false- comments: false- categories: [deep_learning, computer_vision] You can download resources for today from this [link](https://github.com/ytu-cvlab/mce-51069-week3-day1/archive/main.zip). We have also posted a [guide video](https://www.youtube.com/watch?v=hIaURCPvCf4) on downloading and accessing materials on [youtube channel](https://www.youtube.com/channel/UCDFhKEbfpxKXVk4Mryh7yhA). Datasets Datasets comes in different forms from various sources. So the question here is what exactly is a dataset and how do we handle datasets for machine learning? To experiment the conditions, we must first know how to manipulate a dataset. Brief Introduction to Pandas Pandas is a python library for data manipulation and analysis. In this section, we will feature a brief introuction to pandas.
import pandas as pd import numpy as np import matplotlib.pyplot as plt import cv2 import math %matplotlib inline
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Pandas stores data in dataframe objects. We can assign columns to each to numpy array (or) list to create a dataframe.
#Create a dataframe names = ['Jack','Jean','Jennifer','Jimmy'] ages = np.array([23,22,24,21]) # print(type(names)) # print(type(ages)) df = pd.DataFrame({'name': names, 'age': ages, 'city': ['London', 'Berlin', 'New York', 'Sydney']},index=None) df.head() # df.style.hide_index()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Now, let's see some handy dataframe tricks.
df[['name','city']] df.info() # print(df.columns) # print(df.age)
<class 'pandas.core.frame.DataFrame'> RangeIndex: 4 entries, 0 to 3 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 name 4 non-null object 1 age 4 non-null int32 2 city 4 non-null object dtypes: int32(1), object(2) memory usage: 208.0+ bytes
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Now that we know how to create a dataframe, we can save the dataframe we created.
df.to_csv('Ages_and_cities.csv',index=False,header=True) df = pd.read_csv('Ages_and_cities.csv') df.head()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Understanding your dataset In this section, we used [Iris flowers dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), which contains petal and sepal measurements of three species of Iris flowers. Three species of Iris flowers from the dataset ![](images/iris.png) Sepal vs Petal This dataset was introduced by biologist Ronald Fisher in his 1936 paper. The following figure explains the way length and width are mesured or petal and speal of each flower. ![](images/sepal_petal.jpg)[Image source](https://www.oreilly.com/library/view/python-artificial-intelligence/9781789539462/assets/462dc4fa-fd62-4539-8599-ac80a441382c.png) When we observe the dataset, we will discover that the dataset has four features and three unique labels for three flowers.
df = pd.read_csv('iris_data.csv') df.head() # df.head(3) df.tail()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Slicing data Now that we understand our dataset, let's prepare to seperate our data based on labels for unique visualization.
# df.loc[:3] df.loc[80:85,("sepal_length","variety")] # df.iloc[146:] # df.iloc[80:85,2:5] df.iloc[80:85,[0,4]] Se= df.loc[df.variety =='Setosa', :] Vc= df.loc[df.variety =='Versicolor', :] Vi= df.loc[df.variety =='Virginica', :] Vi.head()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Feature visualization
df = pd.read_csv('iris_data.csv') # df.dtypes
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
First, we will visualize each measurement with histograms to observe the output distribution for each class.
# df.hist("sepal.length",bins=15,edgecolor='black') plt.figure(figsize=(15,15)) plt.subplot(2, 2, 1) plt.hist(Se.sepal_length,bins=15,color="steelblue",edgecolor='black',alpha =0.4, label="Setosa") plt.hist(Vc.sepal_length,bins=15,color='red',edgecolor='black', alpha =0.3, label="Versicolor") plt.hist(Vi.sepal_length,bins=15,color='blue',edgecolor='black', alpha =0.3, label="Virginica") plt.title("sepal length distribution"), plt.xlabel('cm') plt.legend() plt.subplot(2, 2, 2) plt.hist(Se.sepal_width,bins=15,color="steelblue",edgecolor='black',alpha =0.4, label="Setosa") plt.hist(Vc.sepal_width,bins=15,color='red',edgecolor='black', alpha =0.3, label="Versicolor") plt.hist(Vi.sepal_width,bins=15,color='blue',edgecolor='black', alpha =0.3, label="Virginica") plt.title("sepal width distribution"), plt.xlabel('cm') plt.legend() plt.subplot(2, 2, 3) plt.hist(Se.petal_length,bins=10,color="steelblue",edgecolor='black',alpha =0.4, label="Setosa") plt.hist(Vc.petal_length,bins=10,color='red',edgecolor='black', alpha =0.3, label="Versicolor") plt.hist(Vi.petal_length,bins=10,color='blue',edgecolor='black', alpha =0.3, label="Virginica") plt.title("petal length distribution"), plt.xlabel('cm') plt.legend() plt.subplot(2, 2, 4) plt.hist(Se.petal_width,bins=10,color="steelblue",edgecolor='black',alpha =0.4, label="Setosa") plt.hist(Vc.petal_width,bins=10,color='red',edgecolor='black', alpha =0.3, label="Versicolor") plt.hist(Vi.petal_width,bins=10,color='blue',edgecolor='black', alpha =0.3, label="Virginica") plt.title("petal width distribution"), plt.xlabel('cm') plt.legend()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Now, we will visualize multiple features with scatter plots to gain some more insights.
plt.figure(figsize=(15,15)) area = np.pi*20 plt.subplot(2, 2, 1) plt.scatter(Se.sepal_length,Se.sepal_width, s=area, c="steelblue", alpha=0.6, label="Setosa") plt.scatter(Vc.sepal_length,Vc.sepal_width, s=area, c="red", alpha=0.6, label="Versicolor") plt.scatter(Vi.sepal_length,Vi.sepal_width, s=area, c="blue", alpha=0.5, label="Virginica") plt.title("sepal length Vs sepal width"), plt.xlabel('cm'), plt.ylabel('cm') plt.legend() plt.subplot(2, 2, 2) plt.scatter(Se.petal_length,Se.petal_width, s=area, c="steelblue", alpha=0.6, label="Setosa") plt.scatter(Vc.petal_length,Vc.petal_width, s=area, c="red", alpha=0.6, label="Versicolor") plt.scatter(Vi.petal_length,Vi.petal_width, s=area, c="blue", alpha=0.5, label="Virginica") plt.title("petal length Vs petal width"), plt.xlabel('cm'), plt.ylabel('cm') plt.legend() plt.subplot(2, 2, 3) plt.scatter(Se.sepal_length,Se.petal_length, s=area, c="steelblue", alpha=0.6, label="Setosa") plt.scatter(Vc.sepal_length,Vc.petal_length, s=area, c="red", alpha=0.6, label="Versicolor") plt.scatter(Vi.sepal_length,Vi.petal_length, s=area, c="blue", alpha=0.5, label="Virginica") plt.title("sepal length Vs petal length"), plt.xlabel('cm'), plt.ylabel('cm') plt.legend() plt.subplot(2, 2, 4) plt.scatter(Se.sepal_width,Se.petal_width, s=area, c="steelblue", alpha=0.6, label="Setosa") plt.scatter(Vc.sepal_width,Vc.petal_width, s=area, c="red", alpha=0.6, label="Versicolor") plt.scatter(Vi.sepal_width,Vi.petal_width, s=area, c="blue", alpha=0.5, label="Virginica") plt.title("sepal width Vs petal width"), plt.xlabel('cm'), plt.ylabel('cm') plt.legend()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
We can definitely see some blobs forming from these visualizations. "Setosa" class unsally stands out from the other two classes but the sepal width vs sepal length plot shows "versicolor" and "virginica" classes will more challenging to classify compared to "setosa" class. Training the model [Scikit-learn](https://scikit-learn.org/stable/) is a free machine learning library for Python which features various classification, regression and clustering algorithms.[Seaborn](https://seaborn.pydata.org/) is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics
from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn import metrics import seaborn as sns df = pd.read_csv('iris_data.csv') # df.dtypes df.tail() train_X, test_X, train_y, test_y = train_test_split(df[df.columns[0:4]].values, df.variety.values, test_size=0.25) modelDT = DecisionTreeClassifier().fit(train_X, train_y) DT_predicted = modelDT.predict(test_X) modelRF = RandomForestClassifier().fit(train_X, train_y) RF_predicted = modelRF.predict(test_X)
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Model Evaluation Decision Tree classifier
print(metrics.classification_report(DT_predicted, test_y)) mat = metrics.confusion_matrix(test_y, DT_predicted) sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False) plt.xlabel('true label') plt.ylabel('predicted label');
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Ramdom Forest Classifier
print(metrics.classification_report(RF_predicted, test_y)) from sklearn.metrics import confusion_matrix import seaborn as sns mat = confusion_matrix(test_y, RF_predicted) sns.heatmap(mat.T, square=True, annot=True,fmt='d', cbar=False) plt.xlabel('true label') plt.ylabel('predicted label');
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
[colab notebook](https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.08-Random-Forests.ipynb) Feature Engineering When generating new features, the product between two features is usually not recommended to engineer unless it makes a magnification of the situation. Here, we use two new features, petal hypotenuse and petal product.
#Generate new features df = pd.read_csv('iris_data.csv') df['petal_hypotenuse'] = np.sqrt(df["petal_length"]**2+df["petal_width"]**2) df['petal_product']=df["petal_length"]*df["petal_width"] df.tail() Se= df.loc[df.variety =='Setosa', :] Vc= df.loc[df.variety =='Versicolor', :] Vi= df.loc[df.variety =='Virginica', :] plt.figure(figsize=(16,8)) plt.subplot(1, 2, 1) plt.hist(Se.petal_hypotenuse,bins=10,color="steelblue",edgecolor='black',alpha =0.4 , label="Setosa") plt.hist(Vc.petal_hypotenuse,bins=10,color='red',edgecolor='black', alpha =0.3, label="Versicolor") plt.hist(Vi.petal_hypotenuse,bins=10,color='blue',edgecolor='black', alpha =0.3, label="Virginica") plt.legend() plt.title("petal hypotenuse distribution"), plt.xlabel('cm') plt.subplot(1, 2, 2) plt.hist(Se.petal_product,bins=10,color="steelblue",edgecolor='black',alpha =0.4, label="Setosa") plt.hist(Vc.petal_product,bins=10,color='red',edgecolor='black', alpha =0.3, label="Versicolor") plt.hist(Vi.petal_product,bins=10,color='blue',edgecolor='black', alpha =0.3, label="Virginica") plt.legend() plt.title("petal product distribution"), plt.xlabel('cm') plt.figure(figsize=(10,10)) area = np.pi*20 plt.scatter(Se.petal_hypotenuse,Se.petal_product, s=area, c="steelblue", alpha=0.6, label="Setosa") plt.scatter(Vc.petal_hypotenuse,Vc.petal_product, s=area, c="red", alpha=0.6, label="Versicolor") plt.scatter(Vi.petal_hypotenuse,Vi.petal_product, s=area, c="blue", alpha=0.5, label="Virginica") plt.title("petal hypotenuse Vs petal product"), plt.xlabel('cm'), plt.ylabel('cm^2') plt.legend()
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Train with Engineered features Now, let's replace two petal features with two new features we generated.
df.head() df2 = df.loc[:,["sepal_length","sepal_width","petal_hypotenuse","petal_product","variety"]] df2.dtypes train_X, test_X, train_y, test_y = train_test_split(df2[df2.columns[0:4]].values, df2.variety.values, test_size=0.25) from sklearn.tree import DecisionTreeClassifier modelDT = DecisionTreeClassifier().fit(train_X, train_y) DT_predicted = modelDT.predict(test_X) from sklearn.ensemble import RandomForestClassifier modelRF = RandomForestClassifier().fit(train_X, train_y) RF_predicted = modelRF.predict(test_X) print(metrics.classification_report(DT_predicted, test_y)) # print(metrics.classification_report(RF_predicted, test_y)) from sklearn.metrics import confusion_matrix import seaborn as sns mat = confusion_matrix(test_y, DT_predicted) # mat = confusion_matrix(test_y, RF_predicted) sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False) plt.xlabel('true label') plt.ylabel('predicted label');
_____no_output_____
Apache-2.0
_notebooks/2020-12-23-week3-day1.ipynb
ytu-cvlab/Resource-Blog
Data PreprocessingThis notebook shows
import pandas as pd import numpy as np import matplotlib.pyplot as plt import re import json import nltk from nltk.corpus import wordnet import sklearn import seaborn as sns import unicodedata import inflect nltk.download('punkt') nltk.download('wordnet') nltk.download('averaged_perceptron_tagger') nltk.download('stopwords')
[nltk_data] Downloading package punkt to /home/jovyan/nltk_data... [nltk_data] Package punkt is already up-to-date! [nltk_data] Downloading package wordnet to /home/jovyan/nltk_data... [nltk_data] Package wordnet is already up-to-date! [nltk_data] Downloading package averaged_perceptron_tagger to [nltk_data] /home/jovyan/nltk_data... [nltk_data] Package averaged_perceptron_tagger is already up-to- [nltk_data] date! [nltk_data] Downloading package stopwords to /home/jovyan/nltk_data... [nltk_data] Package stopwords is already up-to-date!
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Load the data:
path = '../data/movies_metadata.csv' df = pd.read_csv(path) df = pd.concat([df['release_date'], df['title'], df['overview'], df['genres']], axis=1) # remove duplicates duplicate_rows = df[df.duplicated()] df.drop(duplicate_rows.index, inplace=True)
_____no_output_____
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Drop the NaN rows where either title or overview is NaN
# convert empty string to NaN df['overview'].replace('', np.nan, inplace=True) df.dropna(subset=['release_date', 'title', 'overview'], inplace=True) # the release date is no longer necessary, because NaN are cleared del df['release_date']
_____no_output_____
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Drop rows with no overview info or blank
reg_404 = "^not available|^no overview" overview_not_found = df['overview'].str.contains(reg_404, regex=True, flags=re.IGNORECASE) overview_blank = df['overview'].str.isspace() df.drop(df[overview_not_found].index, inplace=True) df.drop(df[overview_blank].index, inplace=True) df.head()
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:6: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Transform column genre
def extract_genres(genres_str): genres_str = genres_str.replace("'", '\"') genres_json = json.loads(genres_str) genres_list = [] for elem in genres_json: genres_list.append(elem['name']) return genres_list # remove rows with no genres, since they don't provide any information df.drop(df[df['genres'] == '[]'].index, inplace=True) # transform genres from string to list temp_genre = df['genres'].apply(extract_genres) # test conversion to list went ok g_set = set() for i, row in df['genres'].iteritems(): reg = '' for genre in temp_genre[i]: reg = reg + '(?=.*' + genre + ')' g_set.add(genre) if not re.search(reg, row) or len(temp_genre[i]) == 0: print('FAILED: at i =', i , row) print(reg) break df['genres'] = temp_genre
_____no_output_____
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Visualise movie genres' distribution
all_genres = sum(df['genres'], []) genre_types = set(all_genres) len(genre_types) all_genres = nltk.FreqDist(all_genres) # create dataframe all_genres_df = pd.DataFrame({'Genre': list(all_genres.keys()), 'Count': list(all_genres.values())}) g = all_genres_df.nlargest(columns="Count", n = 50) plt.figure(figsize=(12,15)) ax = sns.barplot(data=g, x= "Count", y = "Genre") plt.show()
_____no_output_____
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Text Preprocessing
def to_lower(text): return text.lower() def remove_specials(sentence): sentence = sentence.replace('-', ' ') sentence = re.sub(r'[^\w\s]', '', sentence) return sentence def remove_stopwords(tokens): words = [] for word in tokens: if word not in nltk.corpus.stopwords.words('english'): words.append(word) return words def replace_nums2words(tokens): e = inflect.engine() words = [] for word in tokens: if word.isdigit(): words.append(e.number_to_words(word).replace(',', '')) else: words.append(word) return words def lemmatisation(tokens): pos_tag = nltk.pos_tag(tokens) lemmatiser = nltk.WordNetLemmatizer() wornet_tags = {"J": wordnet.ADJ, "N": wordnet.NOUN, "V": wordnet.VERB, "R": wordnet.ADV} words = [] for word, tag in pos_tag: proper_tag = wornet_tags.get(tag[0].upper(), wordnet.NOUN) words.append(lemmatiser.lemmatize(word, proper_tag)) return words def text_preprocessing(text): # 1. Transform all characters in lowercase text = to_lower(text) # 2. Replace all compatibility characters with their equivalents (i.e. accented) text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8') # 3. Remove special characters (punctuation, extra spaces) text = remove_specials(text) # 4. Tokenization toks = nltk.word_tokenize(text) # 5. Stopwords removal toks = remove_stopwords(toks) # 5. Convert to number to text representation toks = replace_nums2words(toks) # 6. Lemmatisation # toks = lemmatisation(toks) return toks df['overview'] = df['overview'].apply(text_preprocessing) def flatten_overview_words(column): all_words = [] for overview in column.values.tolist(): for word in overview: all_words.append(word) return all_words def freq_words(x, terms = 30): fdist = nltk.FreqDist(x) words_df = pd.DataFrame({'word':list(fdist.keys()), 'count':list(fdist.values())}) # selecting top 20 most frequent words d = words_df.nlargest(columns="count", n = terms) # visualize words and frequencies plt.figure(figsize=(12,15)) ax = sns.barplot(data=d, x= "count", y = "word") ax.set(ylabel = 'Word') plt.show() # print 100 most frequent words freq_words(flatten_overview_words(df['overview']), 50) new_df = df['title'] str_overview = df['overview'].apply(lambda x: ' '.join(x)) new_df = pd.concat([new_df, str_overview], axis=1) new_df = pd.concat([new_df, df['genres']], axis=1) new_df['genres'] = new_df['genres'].apply(lambda x: ','.join(x)) new_df['overview'] = new_df['title'].apply(to_lower).astype(str) + ' ' + new_df ['overview'] new_df
_____no_output_____
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Save the processed data:
# new_df.to_csv("../data/movies_data_ready.csv", index=False)
_____no_output_____
MIT
notebooks/data_preprocessing.ipynb
SudoHead/movie-classifier
Model Training
for a in alpha: for i in range(5000): for j in range(x.size): h[j]=theta[0]*1 + theta[1]*x[j] + theta[2]*np.power(x[j],2) + theta[3]*np.power(x[j],3) theta=theta+a*(y[j]-h[j])*np.array([1, x[j], np.power(x[j],2), np.power(x[j],3)]) if a==0.03: error[i]=np.sum(np.square(h-y))/2 err=np.sum(np.square(h-y)) print('Error with learning rate {} is {} '.format(a, err)) if err<min_err: min_err=err best_h=h best_theta=theta best_alpha=a print('min_err: ',min_err) print('best_theta: ',best_theta) print('best_alpha: ',best_alpha) error f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5)) ax1.plot(x, y, 'o', label='Actual datapoints') ax1.plot(x, h_init, 'go', label='Initial model prediction') ax1.plot(x, best_h, 'ro', label='Final model prediction') ax1.legend() ax2.plot(error) ax2.set_title('Error curve') plt.show()
_____no_output_____
MIT
Results/Jupyter/ML/ex1.ipynb
in2dblue/interactive-rl
Dataset Bug DetectionIn this example, we will demonstrate how to detect bugs in a data set using the public Airlines data set.
# Since we use the category_encoders library to perform binary encoding on some of the features in this demo, # we'll need to install it. !pip install category_encoders import pandas pandas.options.display.max_rows=5 # restrict to 5 rows on display df = pandas.read_csv("https://raw.githubusercontent.com/Devvrat53/Flight-Delay-Prediction/master/Data/flight_data.csv") df['date'] = pandas.to_datetime(df[['year', 'month', 'day']]) df['day_index'] = (df['date'] - df['date'].min()).dt.days df['DayOfWeek'] = df['date'].dt.day_name() df['Month'] = df['date'].dt.month_name() df
_____no_output_____
Apache-2.0
docs/notebooks/divergence/BugDetection.ipynb
FINRAOS/model-validation-toolkit
Prepare daily dataLet's assume that we run new data each day through our model. For simplicity we will just look at the last 10 days of data.
df_daily = df[df['month'] > 11] df_daily = df_daily[df_daily['day'] > 20] df_daily
_____no_output_____
Apache-2.0
docs/notebooks/divergence/BugDetection.ipynb
FINRAOS/model-validation-toolkit
Bug DetectionNow we want to find any bugs in any of our daily sets of data that we feed to our model.Note that we are performing binary encoding on the categorical columns (carrier, origin, and dest) so that we can pass the data to the variational estimation function directly. We are doing this for performance reasons vs. the hybrid estimation, and to strike a balance between plain index encoding and one-hot encoding.
import category_encoders as ce from mvtk.supervisor.utils import compute_divergence_crosstabs from mvtk.supervisor.divergence import calc_tv_knn columns = ['dep_time', 'sched_dep_time', 'dep_delay', 'arr_time', 'sched_arr_time', 'arr_delay', 'air_time', 'distance', 'hour', 'minute', 'carrier', 'origin', 'dest'] encoder = ce.BinaryEncoder(cols=['carrier', 'origin', 'dest']) encoder.fit(df_daily[columns + ['day']]) df_daily_encoded = encoder.transform(df_daily[columns + ['day']].fillna(0)) f = lambda x, y: calc_tv_knn(x, y, k = 26) result = compute_divergence_crosstabs(df_daily_encoded, datecol='day', divergence=f) import matplotlib.pyplot as plt import seaborn as sns sns.heatmap(result, cmap='coolwarm', linewidths=0.30, annot=True) plt.show()
_____no_output_____
Apache-2.0
docs/notebooks/divergence/BugDetection.ipynb
FINRAOS/model-validation-toolkit
As you can see from the heatmap above, although there are some divergences between the days, there is nothing that is too alarming.Let's now update our data set to contain a "bug" in the "sched_dep_time" feature. For day 30, all of the values of that feature are null (which we are then translating to 0).
df_daily.loc[df_daily['day'] == 30, ['sched_dep_time']] = None
_____no_output_____
Apache-2.0
docs/notebooks/divergence/BugDetection.ipynb
FINRAOS/model-validation-toolkit
Below is the percentage of scheduled departure times that are empty per day in our updated daily data set
day = 21 for df_day in df_daily.groupby('day'): day_pct = df_day[1]['sched_dep_time'].value_counts(normalize=True, dropna=False) * 100 pct = day_pct.loc[day_pct.index.isnull()].values if (len(pct) == 0): pct = 0 else: pct = pct[0] print('Day ' + str(day) + ': ' + str(round(pct)) + '%') day += 1 from mvtk.supervisor.divergence import calc_tv_knn encoder = ce.BinaryEncoder(cols=['carrier', 'origin', 'dest']) encoder.fit(df_daily[columns + ['day']]) df_daily_encoded = encoder.transform(df_daily[columns + ['day']].fillna(0)) f = lambda x, y: calc_tv_knn(x, y, k = 26) result = compute_divergence_crosstabs(df_daily_encoded, datecol='day', divergence=f) import matplotlib.pyplot as plt import seaborn as sns sns.heatmap(result, cmap='coolwarm', linewidths=0.30, annot=True) plt.show()
_____no_output_____
Apache-2.0
docs/notebooks/divergence/BugDetection.ipynb
FINRAOS/model-validation-toolkit
Think BayesThis notebook presents example code and exercise solutions for Think Bayes.Copyright 2018 Allen B. DowneyMIT License: https://opensource.org/licenses/MIT
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import classes from thinkbayes2 from thinkbayes2 import Hist, Pmf, Suite, Beta import thinkplot import numpy as np
_____no_output_____
MIT
solutions/world_cup_soln.ipynb
chwebster/ThinkBayes2
Controllo di un braccio robotico con giunto flessibileUn collegamento di un braccio robotico è azionato da un motore elettrico tramite un giunto flessibile che si comporta come una molla torsionale. La dinamica del sistema può essere approssimata con un sistema lineare tempo invariante del terzo ordine in cui gli stati sono:- $x_1$: differenza tra gli angoli del motore e del braccio (non è nulla a causa della flessibilità dell'articolazione),- $x_2$: velocità angolare dell'albero motore,- $x_3$: velocità angolare del link.L'input $u$ è la coppia del motore. Le equazioni dinamiche sono:\begin{cases}\dot{x} = \begin{bmatrix} 0 & 1 & -1 \\ a-1 & -b_1 & b_1 \\ a & b_2 & -b_2 \end{bmatrix}x + \begin{bmatrix} 0 \\ b_3 \\ 0 \end{bmatrix}u \\y = \begin{bmatrix} 0 & 0 & 1 \end{bmatrix}x\end{cases}con $a=0.1$, $b_1=0.09$, $b_2=0.01$ e $b_3=90$.L'obiettivo del progetto del sistema di controllo è quello di regolare la velocità angolare del collegamento in modo da avere poli dominanti con smorzamento pari a 0,7 e frequenza naturale pari a 0,5 rad/s e errore di regime nullo in risposta ad un gradino di velocità di riferimento.La funzione di trasferimento del sistema è:
A = numpy.matrix('0 1 -1; -0.9 -0.09 0.09; 0.1 0.01 -0.01') B = numpy.matrix('0; 90; 0') C = numpy.matrix('0 0 1') D = numpy.matrix('0') sys_tf = control.tf(sss(A,B,C,D)) print(sys_tf)
3.886e-16 s^2 + 0.9 s + 9 ----------------------------- s^3 + 0.1 s^2 + s - 8.674e-19
BSD-3-Clause
ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb
ICCTerasmus/ICCT
con poli
import warnings # In order to suppress the warning BadCoefficient warnings.filterwarnings("ignore") print(numpy.round(sys_tf.pole(),3))
[-0.05+0.999j -0.05-0.999j 0. +0.j ]
BSD-3-Clause
ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb
ICCTerasmus/ICCT
e zeri
print(numpy.round(sys_tf.zero(),3),'.')
[-2.31613695e+15 -1.00000000e+01] .
BSD-3-Clause
ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb
ICCTerasmus/ICCT
Innanzitutto, si analizza il sistema per verificare se è controllabile e osservabile. La matrice di controllabilità $\mathcal{C}$ è
Ctrb = control.ctrb(A,B) display(Markdown(bmatrix(Ctrb))) # print(numpy.linalg.matrix_rank(Ctrb))
_____no_output_____
BSD-3-Clause
ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb
ICCTerasmus/ICCT
e ha rango pari a 3 quindi il sistema è controllabile. La matrice di osservabilità $\mathcal{O}$ è
Obsv = control.obsv(A,C) display(Markdown(bmatrix(Obsv))) # print(numpy.linalg.matrix_rank(Obsv))
_____no_output_____
BSD-3-Clause
ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb
ICCTerasmus/ICCT
e ha rango pari a 3 quindi il sistema è osservabile.Ciò potrebbe essere effettivamente dedotto dal fatto che il denominatore della funzione di trasferimento è del terzo ordine (uguale alla dimensione del vettore nello spazio degli stati). Design del regolatore Design del controllerDati i requisiti, si sa che si devono posizionare 2 poli in $\zeta \omega_n \pm \sqrt{1-\zeta^2}\omega_n = -0.35\pm0.357i$ e posizionare il polo reale rimanente a una frequenza superiore a quella dei poli complessi (dominanti). Si può scegliere di posizionare il terzo polo in -3,5 rad/s. Per il requisito dell'errore di regime nullo, si scala il segnale di riferimento con un guadagno uguale all'inverso del guadagno del sistema ad anello chiuso. Design dell'osservatorePer avere un osservatore che assista rapidamente il controllore semplicemente si posizionano i poli in circa -10 rad/s. Come usare questo notebook?- Verifica se il sistema a ciclo chiuso funziona bene anche in caso di errore di stima nello stato iniziale. Prova a migliorare le prestazioni.- Riduci la frequenza del polo reale del sistema a ciclo chiuso controllato e osserva come la risposta differisce dal riferimento.
# Preparatory cell X0 = numpy.matrix('0.0; 0.0; 0.0') K = numpy.matrix([8/15,-4.4,-4]) L = numpy.matrix([[23],[66],[107/3]]) Aw = matrixWidget(3,3) Aw.setM(A) Bw = matrixWidget(3,1) Bw.setM(B) Cw = matrixWidget(1,3) Cw.setM(C) X0w = matrixWidget(3,1) X0w.setM(X0) Kw = matrixWidget(1,3) Kw.setM(K) Lw = matrixWidget(3,1) Lw.setM(L) eig1c = matrixWidget(1,1) eig2c = matrixWidget(2,1) eig3c = matrixWidget(1,1) eig1c.setM(numpy.matrix([-3.5])) eig2c.setM(numpy.matrix([[-0.35],[-0.357]])) eig3c.setM(numpy.matrix([-3.5])) eig1o = matrixWidget(1,1) eig2o = matrixWidget(2,1) eig3o = matrixWidget(1,1) eig1o.setM(numpy.matrix([-10.])) eig2o.setM(numpy.matrix([[-10.],[0.]])) eig3o.setM(numpy.matrix([-10.])) # Misc #create dummy widget DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px')) #create button widget START = widgets.Button( description='Test', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Test', icon='check' ) def on_start_button_clicked(b): #This is a workaround to have intreactive_output call the callback: # force the value of the dummy widget to change if DW.value> 0 : DW.value = -1 else: DW.value = 1 pass START.on_click(on_start_button_clicked) # Define type of method selm = widgets.Dropdown( options= [('Imposta K e L','Set K and L'), ('Imposta gli autovalori','Set the eigenvalues')], value= 'Set the eigenvalues', description='', disabled=False ) # Define the number of complex eigenvalues sele = widgets.Dropdown( options= [('0 autovalori complessi','0 complex eigenvalues'), ('2 autovalori complessi','2 complex eigenvalues')], value= '2 complex eigenvalues', description='Autovalori complessi:', style = {'description_width': 'initial'}, disabled=False ) #define type of ipout selu = widgets.Dropdown( options=[('impulso','impulse'), ('gradino','step'), ('sinusoide','sinusoid'), ('onda quadra','square wave')], value='step', description='Riferimento:', style = {'description_width': 'initial'}, disabled=False ) # Define the values of the input u = widgets.FloatSlider( value=1, min=0, max=3, step=0.1, description='Riferimento:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) period = widgets.FloatSlider( value=0.5, min=0.001, max=10, step=0.001, description='Periodo: ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.2f', ) gain_w2 = widgets.FloatText( value=1., # description='', description='Guadagno inverso del riferimento:', style = {'description_width': 'initial'}, disabled=True ) simTime = widgets.FloatText( value=20, description='Tempo di simulazione (s):', style = {'description_width': 'initial'}, disabled=False ) # Support functions def eigen_choice(sele): if sele == '0 complex eigenvalues': eig1c.children[0].children[0].disabled = False eig2c.children[1].children[0].disabled = True eig1o.children[0].children[0].disabled = False eig2o.children[1].children[0].disabled = True eig = 0 if sele == '2 complex eigenvalues': eig1c.children[0].children[0].disabled = True eig2c.children[1].children[0].disabled = False eig1o.children[0].children[0].disabled = True eig2o.children[1].children[0].disabled = False eig = 2 return eig def method_choice(selm): if selm == 'Set K and L': method = 1 sele.disabled = True if selm == 'Set the eigenvalues': method = 2 sele.disabled = False return method s = control.tf('s') Gref = (0.5)**2/(s**2 + 2*0.7*0.5*s + (0.5)**2) def main_callback2(Aw, Bw, X0w, K, L, eig1c, eig2c, eig3c, eig1o, eig2o, eig3o, u, period, selm, sele, selu, simTime, DW): eige = eigen_choice(sele) method = method_choice(selm) if method == 1: solc = numpy.linalg.eig(A-B*K) solo = numpy.linalg.eig(A-L*C) if method == 2: if eige == 0: K = control.acker(A, B, [eig1c[0,0], eig2c[0,0], eig3c[0,0]]) Kw.setM(K) L = control.acker(A.T, C.T, [eig1o[0,0], eig2o[0,0], eig3o[0,0]]).T Lw.setM(L) if eige == 2: K = control.acker(A, B, [eig3c[0,0], numpy.complex(eig2c[0,0],eig2c[1,0]), numpy.complex(eig2c[0,0],-eig2c[1,0])]) Kw.setM(K) L = control.acker(A.T, C.T, [eig3o[0,0], numpy.complex(eig2o[0,0],eig2o[1,0]), numpy.complex(eig2o[0,0],-eig2o[1,0])]).T Lw.setM(L) sys = control.ss(A,B,numpy.vstack((C,numpy.zeros((B.shape[1],C.shape[1])))),numpy.vstack((D,numpy.eye(B.shape[1])))) sysC = control.ss(numpy.zeros((1,1)), numpy.zeros((1,numpy.shape(A)[0])), numpy.zeros((numpy.shape(B)[1],1)), -K) sysE = control.ss(A-L*C, numpy.hstack((L,B-L*D)), numpy.eye(numpy.shape(A)[0]), numpy.zeros((A.shape[0],C.shape[0]+B.shape[1]))) sys_append = control.append(sys, sysE, sysC, control.ss(A,B,numpy.eye(A.shape[0]),numpy.zeros((A.shape[0],B.shape[1])))) Q = [] # y in ingresso a sysE for i in range(C.shape[0]): Q.append([B.shape[1]+i+1, i+1]) # u in ingresso a sysE for i in range(B.shape[1]): Q.append([B.shape[1]+C.shape[0]+i+1, C.shape[0]+i+1]) # u in ingresso a sys for i in range(B.shape[1]): Q.append([i+1, C.shape[0]+B.shape[1]+A.shape[0]+i+1]) # u in ingresso al sistema che ha come uscite gli stati reali for i in range(B.shape[1]): Q.append([2*B.shape[1]+C.shape[0]+A.shape[0]+i+1, C.shape[0]+i+1]) # xe in ingresso a sysC for i in range(A.shape[0]): Q.append([2*B.shape[1]+C.shape[0]+i+1, C.shape[0]+B.shape[1]+i+1]) inputv = [i+1 for i in range(B.shape[1])] outputv = [i+1 for i in range(numpy.shape(sys_append.C)[0])] sys_CL = control.connect(sys_append, Q, inputv, outputv) t = numpy.linspace(0, 100000, 2) t, yout = control.step_response(sys_CL[0,0],T=t) dcgain = yout[-1] gain_w2.value = dcgain if dcgain != 0: u1 = u/gain_w2.value else: print('Il guadagno impostato per il riferimento è 0 e quindi viene cambiato a 1') u1 = u/1 print('Il guadagno statico del sistema in anello chiuso (dal riferimento all\'uscita) è: %.5f' %dcgain) X0w1 = numpy.zeros((A.shape[0],1)) for j in range(A.shape[0]): X0w1 = numpy.vstack((X0w1,X0w[j])) X0w1 = numpy.vstack((X0w1,numpy.zeros((A.shape[0],1)))) if simTime != 0: T = numpy.linspace(0, simTime, 10000) else: T = numpy.linspace(0, 1, 10000) if selu == 'impulse': #selu U = [0 for t in range(0,len(T))] U[0] = u U1 = [0 for t in range(0,len(T))] U1[0] = u1 T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1) T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0]) if selu == 'step': U = [u for t in range(0,len(T))] U1 = [u1 for t in range(0,len(T))] T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1) T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0]) if selu == 'sinusoid': U = u*numpy.sin(2*numpy.pi/period*T) U1 = u1*numpy.sin(2*numpy.pi/period*T) T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1) T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0]) if selu == 'square wave': U = u*numpy.sign(numpy.sin(2*numpy.pi/period*T)) U1 = u1*numpy.sign(numpy.sin(2*numpy.pi/period*T)) T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1) T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0]) # N.B. i primi 3 stati di xout sono quelli del sistema, mentre gli ultimi 3 sono quelli dell'osservatore step_info_dict = control.step_info(sys_CL[0,0],SettlingTimeThreshold=0.05,T=T) print('Step info: \n\tTempo di salita =',step_info_dict['RiseTime'],'\n\tTempo di assestamento (5%) =',step_info_dict['SettlingTime'],'\n\tOvershoot (%)=',step_info_dict['Overshoot']) # print('Max x3 value (%)=', max(abs(yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]))/(numpy.pi/180*17)*100) fig = plt.figure(num='Simulation1', figsize=(14,12)) fig.add_subplot(221) plt.title('Risposta dell\'uscita') plt.ylabel('Uscita') plt.plot(T,yout[0],T,yout_ref,T,U,'r--') plt.xlabel('$t$ [s]') plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.legend(['$y$','Sistema del secondo ordine di riferimento','Riferimento']) plt.grid() fig.add_subplot(222) plt.title('Ingresso') plt.ylabel('$u$') plt.plot(T,yout[C.shape[0]]) plt.xlabel('$t$ [s]') plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.grid() fig.add_subplot(223) plt.title('Risposta degli stati') plt.ylabel('Stati') plt.plot(T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]], T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+1], T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]) plt.xlabel('$t$ [s]') plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.legend(['$x_{1}$','$x_{2}$','$x_{3}$']) plt.grid() fig.add_subplot(224) plt.title('Errori di stima') plt.ylabel('Errori') plt.plot(T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]]-yout[C.shape[0]+B.shape[1]], T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+1]-yout[C.shape[0]+B.shape[1]+1], T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]-yout[C.shape[0]+B.shape[1]+2]) plt.xlabel('$t$ [s]') plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.legend(['$e_{1}$','$e_{2}$','$e_{3}$']) plt.grid() #plt.tight_layout() alltogether2 = widgets.VBox([widgets.HBox([selm, sele, selu]), widgets.Label(' ',border=3), widgets.HBox([widgets.Label('K:',border=3), Kw, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('Autovalori:',border=3), eig1c, eig2c, eig3c, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('X0 stim.:',border=3), X0w]), widgets.Label(' ',border=3), widgets.HBox([widgets.Label('L:',border=3), Lw, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('Autovalori:',border=3), eig1o, eig2o, eig3o, widgets.Label(' ',border=3), # widgets.VBox([widgets.Label('Inverse reference gain:',border=3), # widgets.Label('Simulation time [s]:',border=3)]), widgets.VBox([gain_w2,simTime])]), widgets.Label(' ',border=3), widgets.HBox([u, period, START])]) out2 = widgets.interactive_output(main_callback2, {'Aw':Aw, 'Bw':Bw, 'X0w':X0w, 'K':Kw, 'L':Lw, 'eig1c':eig1c, 'eig2c':eig2c, 'eig3c':eig3c, 'eig1o':eig1o, 'eig2o':eig2o, 'eig3o':eig3o, 'u':u, 'period':period, 'selm':selm, 'sele':sele, 'selu':selu, 'simTime':simTime, 'DW':DW}) out2.layout.height = '860px' display(out2, alltogether2)
_____no_output_____
BSD-3-Clause
ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb
ICCTerasmus/ICCT
Data Science and Business Analytics Intern @ The Sparks Foundation Author : Aniket M. Wazarkar Task 2 : Prediction using Unsupervised ML Dataset : Iris.csv (https://bit.ly/3kXTdox) Algorithm used here : K-Means Clustering Import Libraries
%matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.decomposition import PCA
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Load Dataset
df=pd.read_csv('Iris.csv') df
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
K-Means is considered an unsupervised learning algorthm. This means you only need a features matrix. In the iris dataset, there are four features. In this notebook, the features matrix will only be two features as it is easier to visualize clusters in two dimensions. KMeans is a popular clustering algorithm that we can use to find structure in our data.
df.info() df.Species.unique() df["Species"].value_counts()
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Arrange Data into Feature Matrix Use DataFrame.loc attribute to access a particular cell in the given Dataframe using the index and column labels.
features = ['PetalLengthCm','PetalWidthCm'] # Create features matrix x = df.loc[:, features].values x
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
class sklearn.preprocessing.LabelEncoder.Encode target labels with value between 0 and n_classes-1.
from sklearn import preprocessing le=preprocessing.LabelEncoder() df.Species=le.fit_transform(df.Species.values) df.Species y=df.Species y
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Standardize the data Standardize features by removing the mean and scaling to unit varianceThe standard score of a sample x is calculated as:z = (x - u) / swhere u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False.
x=StandardScaler().fit_transform(x)
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Plot data to estimate number of clusters
X=pd.DataFrame(x,columns=features) plt.figure(figsize=(6,5)) plt.scatter(X['PetalLengthCm'], X['PetalWidthCm']) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)'); plt.title('K-Means Clustering')
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Finding the optimum number of clusters for K-means clustering
# Finding the optimum number of clusters for k-means classification wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) kmeans.fit(x) wcss.append(kmeans.inertia_) # Plotting the results onto a line graph, # `allowing us to observe 'The elbow' plt.plot(range(1, 11), wcss) plt.title('The elbow method') plt.xlabel('Number of clusters') plt.ylabel('WCSS') # Within cluster sum of squares plt.show()
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
It is called 'The elbow method' from the above graph, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration.From this we choose the number of clusters as **'3'**. K-Means Clustering
# Make an instance of KMeans with 3 clusters kmeans = KMeans(n_clusters=3, random_state=1) # Fit only on a features matrix kmeans.fit(x) # Get labels and cluster centroids labels = kmeans.labels_ centroids = kmeans.cluster_centers_ labels centroids
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Visually Evaluate the clusters
colormap = np.array(['r', 'g', 'b']) plt.scatter(X['PetalLengthCm'], X['PetalWidthCm'], c=colormap[labels]) plt.scatter(centroids[:,0], centroids[:,1], s = 300, marker = 'x', c = 'k') plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)');
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Visually Evaluate the clusters and compare the species
plt.figure(figsize=(10,4)) plt.subplot(1, 2, 1) plt.scatter(X['PetalLengthCm'], X['PetalWidthCm'], c=colormap[labels]) plt.scatter(centroids[:,0], centroids[:,1], s = 300, marker = 'x', c = 'k') plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)'); plt.title('K-Means Clustering (k = 3)') plt.subplot(1, 2, 2) plt.scatter(X['PetalLengthCm'], X['PetalWidthCm'], c=colormap[y], s=40) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)'); plt.title('Flower Species') plt.tight_layout()
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
They look pretty similar. Looks like KMeans picked up flower differences with only two features and not the labels. The colors are different in the two graphs simply because KMeans gives out a arbitrary cluster number and the iris dataset has an arbitrary number in the target column. PCA Projection in 2D The original data has 4 columns (sepal length, sepal width, petal length, and petal width). The code below projects the original data which is 4 dimensional into 2 dimensions. Note that after dimensionality reduction, there usually isn’t a particular meaning assigned to each principal component. The new components are just the two main dimensions of variation
pca = PCA(n_components=2) # Fit and transform the data principalComponents = pca.fit_transform(x) principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2']) df=pd.read_csv('Iris.csv')
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
2D Projection
finalDf = pd.concat([principalDf, df[['Species']]], axis = 1) finalDf fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8,8)); targets = df.loc[:, 'Species'].unique() colors = ['r', 'g', 'b'] for target, color in zip(targets,colors): indicesToKeep = finalDf['Species'] == target ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'] , finalDf.loc[indicesToKeep, 'principal component 2'] , c = color , s = 50) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 Component PCA', fontsize = 20) ax.legend(targets) ax.grid()
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
From the graph, it looks like the setosa class is well separated from the versicolor and virginica classes. Explained Varience The explained variance tells us how much information (variance) can be attributed to each of the principal components. This is important as while you can convert 4 dimensional space to 2 dimensional space, you lose some of the variance (information) when you do this.
pca.explained_variance_ratio_ sum(pca.explained_variance_ratio_)
_____no_output_____
Apache-2.0
Task #2 Prediction using Unsupervised ML.ipynb
aniketspeaks/Task-2-Prediction-using-Unsupervised-ML
Dynamics 365 Business Central Trouble Shooting Guide (TSG) - Performance analysis (overview) This notebook contains Kusto queries that can help getting to the root cause of a performance issue for an environment. Each section in the notebook contains links to the performance tuning guide on docs [aka.ms/bcperformance](aka.ms/bcperformance), links to the documentation of relevant telemetry in [aka.ms/bctelemetry](aka.ms/bctelemetry), as well as Kusto queries that help dive into a specific area (sessions, web service requests, database calls, reports, and page load times). NB! Some of the signal used in this notebook is only available in newer versions of Business Central, so check the version of your environment if some sections do not return any data. The signal documentation states in which version a given signal was introduced. 1. Connect to Application Insights First you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it?) and connect to your Application Insights resource (get appid and appkey from the API access page in the Application Insights portal)
# load the KQLmagic module %reload_ext Kqlmagic # Connect to the Application Insights API %kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>'
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
2. Define filters This workbook is designed for troubleshooting a single environment. Please provide values for aadTenantId and environmentName:
aadTenantId = "<Add AAD tenant id here>" environmentName = "<add environment name here>"
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
Analyze performance Now you can run Kusto queries to look for possible root causes for performance issues. Either click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries Sessions Performance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-onlinetelemetry Session telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-authorization-traceauthorization-succeeded-open-company KQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/Authorization.kql
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0004' and timestamp > ago(7d) | extend clientType = tostring( customDimensions.clientType ) | summarize request_count=count() by clientType, bin(timestamp, 1d) | render timechart title= 'Number of sessions by client type' %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0004' and timestamp > ago(7d) | extend clientType = tostring( customDimensions.clientType ) , executionTimeInSec = toreal(totimespan(customDimensions.serverExecutionTime))/10000000 | summarize _count=count() by executionTimeInSeconds = bin(executionTimeInSec, 1), clientType | extend log_count = log10( _count ) | order by clientType, executionTimeInSeconds asc | render columnchart with (ycolumns = log_count, series = clientType, title= 'Execution time (in seconds) of session login time by client type', ytitle = 'log(count)')
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
Web service requests Performance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developerwriting-efficient-web-services Web service telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace KQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) | extend category = tostring( customDimensions.category ) | summarize request_count=count() by category, bin(timestamp, 1d) | render timechart title= 'Number of web service requests by category' %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) | extend category = tostring( customDimensions.category ) , executionTimeInMS = toreal(totimespan(customDimensions.serverExecutionTime))/10000 //the datatype for executionTime is timespan | summarize count=count() by executionTime_ms = bin(executionTimeInMS, 100), category | order by category, executionTime_ms asc | render columnchart with (ycolumns = count, series = category, title= 'Execution time (in milliseconds) of web service requests by category' )
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
Data related Performance tuning guide: * [Efficient data access](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developerefficient-data-access) * [Avoid locking](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-applicationavoid-locking) Database telemetry docs: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-long-running-sql-query-trace * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-database-locks-trace KQL samples: * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/Long%20Running%20SQL%20Queries.kql * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/LockTimeouts.kql
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0005' and timestamp > ago(7d) | summarize count() by bin(timestamp, 1d) | render timechart title= 'Number of long running SQL queries' %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0012' and timestamp > ago(7d) | summarize request_count=count() by bin(timestamp, 1d) | render timechart title= 'Number of database lock timeouts'
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
Company management Operations such as "copy company" can cause performance degradations if they are done when users are logged into the system. Read more in the performance tuning guide here: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-applicationbe-cautious-with-the-renamecopy-company-operations Telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-company-lifecycle-trace KQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/CompanyLifecycle.kql
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId in ('LC0001') and timestamp > ago(7d) | extend operation_type = case( customDimensions.eventId == 'LC0001', 'Company created', customDimensions.eventId == 'LC0004', 'Company copied', customDimensions.eventId == 'LC0007', 'Company deleted', 'Other' ) | summarize count() by operation_type, bin(timestamp, 1d) | render timechart title= 'Company management operations'
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
Reports Learn more about how to write performant reports here in the performance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developerwriting-efficient-reports Report telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-reports-trace KQL samples: * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/Reports.kql * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/PerformanceTuning/ReportExecution.kql
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0006' and timestamp > ago(7d) | extend clientType = tostring( customDimensions.clientType ) , reportName = tostring( customDimensions.alObjectName ) | where reportName <> '' | summarize count=count() by clientType, bin(timestamp, 1d) | render timechart title= 'Number of reports executed (shown by client/session type)' %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0006' and timestamp > ago(7d) | extend reportName = tostring( customDimensions.alObjectName ) , executionTimeInSec = toreal(totimespan(customDimensions.totalTime))/10000000 //the datatype for totalTime is timespan | where reportName <> '' | summarize avg=avg(executionTimeInSec), median=percentile(executionTimeInSec, 50), percentile95=percentile(executionTimeInSec, 95), max=max(executionTimeInSec) by reportName | order by percentile95 | limit 10 | render columnchart with (title= 'Execution time stats of reports by report name (top 10 by 95% percentile)', ytitle='Time (in seconds)' )
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
Page views Page view telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-page-view-trace KQL samples * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/PageViews.kql * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/BrowserUsage.kql
%%kql // Top 10 longest page times // let _aadTenantId = aadTenantId; let _environmentName = environmentName; pageViews | where 1==1 and customDimensions.aadTenantId == _aadTenantId // and customDimensions.environmentName == _environmentName | where timestamp > ago(7d) | extend objectId = tostring(customDimensions.alObjectId) | summarize median_load_time_in_MS = percentile(duration,50) by pageName=name, objectId | order by median_load_time_in_MS desc | limit 10
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb
dmc-dk/BCTech
FloPy Creating a Complex MODFLOW 6 Model with FlopyThe purpose of this notebook is to demonstrate the Flopy capabilities for building a more complex MODFLOW 6 model from scratch. This notebook will demonstrate the capabilities by replicating the advgw_tidal model that is distributed with MODFLOW 6. Setup the Notebook Environment
%matplotlib inline import sys import os import platform import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) # For this example, we will set up a model workspace. # Model input files and output files will reside here. model_name = 'advgw_tidal' workspace = os.path.join('data', model_name) if not os.path.exists(workspace): os.makedirs(workspace) data_pth = os.path.join('..', 'data', 'mf6', 'create_tests', 'test005_advgw_tidal') assert os.path.isdir(data_pth) # create simulation sim = flopy.mf6.MFSimulation(sim_name=model_name, version='mf6', exe_name='mf6', sim_ws=workspace) # create tdis package tdis_rc = [(1.0, 1, 1.0), (10.0, 120, 1.0), (10.0, 120, 1.0), (10.0, 120, 1.0)] tdis = flopy.mf6.ModflowTdis(sim, pname='tdis', time_units='DAYS', nper=4, perioddata=tdis_rc) # create gwf model gwf = flopy.mf6.ModflowGwf(sim, modelname=model_name, model_nam_file='{}.nam'.format(model_name)) gwf.name_file.save_flows = True # create iterative model solution and register the gwf model with it ims = flopy.mf6.ModflowIms(sim, pname='ims', print_option='SUMMARY', complexity='SIMPLE', outer_hclose=0.0001, outer_maximum=500, under_relaxation='NONE', inner_maximum=100, inner_hclose=0.0001, rcloserecord=0.001, linear_acceleration='CG', scaling_method='NONE', reordering_method='NONE', relaxation_factor=0.97) sim.register_ims_package(ims, [gwf.name]) # discretization package nlay = 3 nrow = 15 ncol = 10 botlay2 = {'factor':1.0, 'data': [-100 for x in range(150)]} dis = flopy.mf6.ModflowGwfdis(gwf, pname='dis', nlay=nlay, nrow=nrow, ncol=ncol, delr=500.0, delc=500.0, top=50.0, botm=[5.0, -10.0, botlay2], fname='{}.dis'.format(model_name)) # initial conditions ic = flopy.mf6.ModflowGwfic(gwf, pname='ic', strt=50.0, fname='{}.ic'.format(model_name)) # node property flow npf = flopy.mf6.ModflowGwfnpf(gwf, pname='npf', save_flows=True, icelltype=[1,0,0], k=[5.0, 0.1, 4.0], k33=[0.5, 0.005, 0.1]) # output control oc = flopy.mf6.ModflowGwfoc(gwf, pname='oc', budget_filerecord='{}.cbb'.format(model_name), head_filerecord='{}.hds'.format(model_name), headprintrecord=[('COLUMNS', 10, 'WIDTH', 15, 'DIGITS', 6, 'GENERAL')], saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], printrecord=[('HEAD', 'FIRST'), ('HEAD', 'LAST'), ('BUDGET', 'LAST')]) # storage package sy = flopy.mf6.ModflowGwfsto.sy.empty(gwf, layered=True) for layer in range(0,3): sy[layer]['data'] = 0.2 ss = flopy.mf6.ModflowGwfsto.ss.empty(gwf, layered=True, default_value=0.000001) sto = flopy.mf6.ModflowGwfsto(gwf, pname='sto', save_flows=True, iconvert=1, ss=ss, sy=sy, steady_state={0:True}, transient={1:True}) # well package # test empty with aux vars, bound names, and time series period_two = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=3, aux_vars=['var1', 'var2', 'var3'], boundnames=True, timeseries=True) period_two[0][0] = ((0,11,2), -50.0, -1, -2, -3, None) period_two[0][1] = ((2,4,7), 'well_1_rate', 1, 2, 3, 'well_1') period_two[0][2] = ((2,3,2), 'well_2_rate', 4, 5, 6, 'well_2') period_three = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=2, aux_vars=['var1', 'var2', 'var3'], boundnames=True, timeseries=True) period_three[0][0] = ((2,3,2), 'well_2_rate', 1, 2, 3, 'well_2') period_three[0][1] = ((2,4,7), 'well_1_rate', 4, 5, 6, 'well_1') period_four = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=5, aux_vars=['var1', 'var2', 'var3'], boundnames=True, timeseries=True) period_four[0][0] = ((2,4,7), 'well_1_rate', 1, 2, 3, 'well_1') period_four[0][1] = ((2,3,2), 'well_2_rate', 4, 5, 6, 'well_2') period_four[0][2] = ((0,11,2), -10.0, 7, 8, 9, None) period_four[0][3] = ((0,2,4), -20.0, 17, 18, 19, None) period_four[0][4] = ((0,13,5), -40.0, 27, 28, 29, None) stress_period_data = {} stress_period_data[1] = period_two[0] stress_period_data[2] = period_three[0] stress_period_data[3] = period_four[0] wel = flopy.mf6.ModflowGwfwel(gwf, pname='wel', print_input=True, print_flows=True, auxiliary=[('var1', 'var2', 'var3')], maxbound=5, stress_period_data=stress_period_data, boundnames=True, save_flows=True, ts_filerecord='well-rates.ts') # well ts package ts_recarray =[(0.0, 0.0, 0.0, 0.0), (1.0, -200.0, 0.0, -100.0), (11.0, -1800.0, -500.0, -200.0), (21.0, -200.0, -400.0, -300.0), (31.0, 0.0, -600.0, -400.0)] well_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='well_ts', fname='well-rates.ts', parent_file=wel, timeseries=ts_recarray, time_series_namerecord=[('well_1_rate', 'well_2_rate', 'well_3_rate')], interpolation_methodrecord=[('stepwise', 'stepwise', 'stepwise')]) # Evapotranspiration evt_period = flopy.mf6.ModflowGwfevt.stress_period_data.empty(gwf, 150, nseg=3) for col in range(0, 10): for row in range(0, 15): evt_period[0][col*15+row] = (((0, row, col), 50.0, 0.0004, 10.0, 0.2, 0.5, 0.3, 0.1, None)) evt = flopy.mf6.ModflowGwfevt(gwf, pname='evt', print_input=True, print_flows=True, save_flows=True, maxbound=150, nseg=3, stress_period_data=evt_period) # General-Head Boundaries ghb_period = {} ghb_period_array = [] for layer, cond in zip(range(1, 3), [15.0, 1500.0]): for row in range(0, 15): ghb_period_array.append(((layer, row, 9), 'tides', cond, 'Estuary-L2')) ghb_period[0] = ghb_period_array ghb = flopy.mf6.ModflowGwfghb(gwf, pname='ghb', print_input=True, print_flows=True, save_flows=True, boundnames=True, ts_filerecord='tides.ts', obs_filerecord='{}.ghb.obs'.format(model_name), maxbound=30, stress_period_data=ghb_period) ts_recarray=[] fd = open(os.path.join(data_pth, 'tides.txt'), 'r') for line in fd: line_list = line.strip().split(',') ts_recarray.append((float(line_list[0]), float(line_list[1]))) ghb_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='tides_ts', fname='tides.ts', parent_file=ghb, timeseries=ts_recarray, time_series_namerecord='tides', interpolation_methodrecord='linear') obs_recarray = {'ghb_obs.csv':[('ghb-2-6-10', 'GHB', (1, 5, 9)), ('ghb-3-6-10', 'GHB', (2, 5, 9))], 'ghb_flows.csv':[('Estuary2', 'GHB', 'Estuary-L2'), ('Estuary3', 'GHB', 'Estuary-L3')]} ghb_obs_package = flopy.mf6.ModflowUtlobs(gwf, pname='ghb_obs', fname='{}.ghb.obs'.format(model_name), parent_file=ghb, digits=10, print_input=True, continuous=obs_recarray) obs_recarray = {'head_obs.csv':[('h1_13_8', 'HEAD', (2, 12, 7))], 'intercell_flow_obs1.csv':[('ICF1_1.0', 'FLOW-JA-FACE', (0, 4, 5), (0, 5, 5))], 'head-hydrographs.csv':[('h3-13-9', 'HEAD', (2, 12, 8)), ('h3-12-8', 'HEAD', (2, 11, 7)), ('h1-4-3', 'HEAD', (0, 3, 2)), ('h1-12-3', 'HEAD', (0, 11, 2)), ('h1-13-9', 'HEAD', (0, 12, 8))]} obs_package = flopy.mf6.ModflowUtlobs(gwf, pname='head_obs', fname='{}.obs'.format(model_name), digits=10, print_input=True, continuous=obs_recarray) # River riv_period = {} riv_period_array = [((0,2,0),'river_stage_1',1001.0,35.9,None), ((0,3,1),'river_stage_1',1002.0,35.8,None), ((0,4,2),'river_stage_1',1003.0,35.7,None), ((0,4,3),'river_stage_1',1004.0,35.6,None), ((0,5,4),'river_stage_1',1005.0,35.5,None), ((0,5,5),'river_stage_1',1006.0,35.4,'riv1_c6'), ((0,5,6),'river_stage_1',1007.0,35.3,'riv1_c7'), ((0,4,7),'river_stage_1',1008.0,35.2,None), ((0,4,8),'river_stage_1',1009.0,35.1,None), ((0,4,9),'river_stage_1',1010.0,35.0,None), ((0,9,0),'river_stage_2',1001.0,36.9,'riv2_upper'), ((0,8,1),'river_stage_2',1002.0,36.8,'riv2_upper'), ((0,7,2),'river_stage_2',1003.0,36.7,'riv2_upper'), ((0,6,3),'river_stage_2',1004.0,36.6,None), ((0,6,4),'river_stage_2',1005.0,36.5,None), ((0,5,5),'river_stage_2',1006.0,36.4,'riv2_c6'), ((0,5,6),'river_stage_2',1007.0,36.3,'riv2_c7'), ((0,6,7),'river_stage_2',1008.0,36.2,None), ((0,6,8),'river_stage_2',1009.0,36.1), ((0,6,9),'river_stage_2',1010.0,36.0)] riv_period[0] = riv_period_array riv = flopy.mf6.ModflowGwfriv(gwf, pname='riv', print_input=True, print_flows=True, save_flows='{}.cbc'.format(model_name), boundnames=True, ts_filerecord='river_stages.ts', maxbound=20, stress_period_data=riv_period, obs_filerecord='{}.riv.obs'.format(model_name)) ts_recarray=[(0.0,40.0,41.0),(1.0,41.0,41.5), (2.0,43.0,42.0),(3.0,45.0,42.8), (4.0,44.0,43.0),(6.0,43.0,43.1), (9.0,42.0,42.4),(11.0,41.0,41.5), (31.0,40.0,41.0)] riv_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='riv_ts', fname='river_stages.ts', parent_file=riv, timeseries=ts_recarray, time_series_namerecord=[('river_stage_1', 'river_stage_2')], interpolation_methodrecord=[('linear', 'stepwise')]) obs_recarray = {'riv_obs.csv':[('rv1-3-1', 'RIV', (0,2,0)), ('rv1-4-2', 'RIV', (0,3,1)), ('rv1-5-3', 'RIV', (0,4,2)), ('rv1-5-4', 'RIV', (0,4,3)), ('rv1-6-5', 'RIV', (0,5,4)), ('rv1-c6', 'RIV', 'riv1_c6'), ('rv1-c7', 'RIV', 'riv1_c7'), ('rv2-upper', 'RIV', 'riv2_upper'), ('rv-2-7-4', 'RIV', (0,6,3)), ('rv2-8-5', 'RIV', (0,6,4)), ('rv-2-9-6', 'RIV', (0,5,5,))], 'riv_flowsA.csv':[('riv1-3-1', 'RIV', (0,2,0)), ('riv1-4-2', 'RIV', (0,3,1)), ('riv1-5-3', 'RIV', (0,4,2))], 'riv_flowsB.csv':[('riv2-10-1', 'RIV', (0,9,0)), ('riv-2-9-2', 'RIV', (0,8,1)), ('riv2-8-3', 'RIV', (0,7,2))]} riv_obs_package = flopy.mf6.ModflowUtlobs(gwf, pname='riv_obs', fname='{}.riv.obs'.format(model_name), parent_file=riv, digits=10, print_input=True, continuous=obs_recarray) # First recharge package rch1_period = {} rch1_period_array = [] col_range = {0:3,1:4,2:5} for row in range(0, 15): if row in col_range: col_max = col_range[row] else: col_max = 6 for col in range(0, col_max): if (row == 3 and col == 5) or (row == 2 and col == 4) or (row == 1 and col == 3) or (row == 0 and col == 2): mult = 0.5 else: mult = 1.0 if row == 0 and col == 0: bnd = 'rch-1-1' elif row == 0 and col == 1: bnd = 'rch-1-2' elif row == 1 and col == 2: bnd = 'rch-2-3' else: bnd = None rch1_period_array.append(((0, row, col), 'rch_1', mult, bnd)) rch1_period[0] = rch1_period_array rch1 = flopy.mf6.ModflowGwfrch(gwf, fname='{}_1.rch'.format(model_name), pname='rch_1', fixed_cell=True, auxiliary='MULTIPLIER', auxmultname='MULTIPLIER', print_input=True, print_flows=True, save_flows=True, boundnames=True, ts_filerecord='recharge_rates_1.ts', maxbound=84, stress_period_data=rch1_period) ts_recarray=[(0.0, 0.0015), (1.0, 0.0010), (11.0, 0.0015),(21.0, 0.0025), (31.0, 0.0015)] rch1_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='rch_1_ts', fname='recharge_rates_1.ts', parent_file=rch1, timeseries=ts_recarray, time_series_namerecord='rch_1', interpolation_methodrecord='stepwise') # Second recharge package rch2_period = {} rch2_period_array = [((0,0,2), 'rch_2', 0.5), ((0,0,3), 'rch_2', 1.0), ((0,0,4), 'rch_2', 1.0), ((0,0,5), 'rch_2', 1.0), ((0,0,6), 'rch_2', 1.0), ((0,0,7), 'rch_2', 1.0), ((0,0,8), 'rch_2', 1.0), ((0,0,9), 'rch_2', 0.5), ((0,1,3), 'rch_2', 0.5), ((0,1,4), 'rch_2', 1.0), ((0,1,5), 'rch_2', 1.0), ((0,1,6), 'rch_2', 1.0), ((0,1,7), 'rch_2', 1.0), ((0,1,8), 'rch_2', 0.5), ((0,2,4), 'rch_2', 0.5), ((0,2,5), 'rch_2', 1.0), ((0,2,6), 'rch_2', 1.0), ((0,2,7), 'rch_2', 0.5), ((0,3,5), 'rch_2', 0.5), ((0,3,6), 'rch_2', 0.5)] rch2_period[0] = rch2_period_array rch2 = flopy.mf6.ModflowGwfrch(gwf, fname='{}_2.rch'.format(model_name), pname='rch_2', fixed_cell=True, auxiliary='MULTIPLIER', auxmultname='MULTIPLIER', print_input=True, print_flows=True, save_flows=True, ts_filerecord='recharge_rates_2.ts', maxbound=20, stress_period_data=rch2_period) ts_recarray=[(0.0, 0.0016), (1.0, 0.0018), (11.0, 0.0019),(21.0, 0.0016), (31.0, 0.0018)] rch2_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='rch_2_ts', fname='recharge_rates_2.ts', parent_file=rch2, timeseries=ts_recarray, time_series_namerecord='rch_2', interpolation_methodrecord='linear') # Third recharge package rch3_period = {} rch3_period_array = [] col_range = {0:9,1:8,2:7} for row in range(0, 15): if row in col_range: col_min = col_range[row] else: col_min = 6 for col in range(col_min, 10): if (row == 0 and col == 9) or (row == 1 and col == 8) or (row == 2 and col == 7) or (row == 3 and col == 6): mult = 0.5 else: mult = 1.0 rch3_period_array.append(((0, row, col), 'rch_3', mult)) rch3_period[0] = rch3_period_array rch3 = flopy.mf6.ModflowGwfrch(gwf, fname='{}_3.rch'.format(model_name), pname='rch_3', fixed_cell=True, auxiliary='MULTIPLIER', auxmultname='MULTIPLIER', print_input=True, print_flows=True, save_flows=True, ts_filerecord='recharge_rates_3.ts', maxbound=54, stress_period_data=rch3_period) ts_recarray=[(0.0, 0.0017),(1.0, 0.0020),(11.0, 0.0017),(21.0, 0.0018),(31.0, 0.0020)] rch3_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='rch_3_ts', fname='recharge_rates_3.ts', parent_file=rch3, timeseries=ts_recarray, time_series_namerecord='rch_3', interpolation_methodrecord='linear')
_____no_output_____
CC0-1.0
examples/Notebooks/flopy3_mf6_B_complex-model.ipynb
gyanz/flopy
Create the MODFLOW 6 Input Files and Run the ModelOnce all the flopy objects are created, it is very easy to create all of the input files and run the model.
# change folder to save simulation #sim.simulation_data.mfpath.set_sim_path(run_folder) # write simulation to new location sim.write_simulation() # Print a list of the files that were created # in workspace print(os.listdir(workspace))
['intercell_flow_obs1.csv', 'riv_flowsA.csv', 'advgw_tidal.nam', 'advgw_tidal.ic', 'riv_flowsB.csv', 'advgw_tidal.dis.grb', 'recharge_rates_1.ts', 'advgw_tidal.sto', 'advgw_tidal.lst', 'tides.ts', 'advgw_tidal.cbb', 'advgw_tidal.ims', 'well-rates.ts', 'advgw_tidal.ghb', 'advgw_tidal.obs', 'advgw_tidal.riv.obs', 'ghb_flows.csv', 'advgw_tidal.dis', 'advgw_tidal_1.rch', 'advgw_tidal_3.rch', 'advgw_tidal_2.rch', 'advgw_tidal.oc', 'river_stages.ts', 'advgw_tidal.hds', 'advgw_tidal.wel', 'advgw_tidal.ghb.obs', 'advgw_tidal.npf', 'head_obs.csv', 'advgw_tidal.tdis', 'ghb_obs.csv', 'recharge_rates_2.ts', 'mfsim.nam', 'advgw_tidal.riv', 'head-hydrographs.csv', 'mfsim.lst', 'recharge_rates_3.ts', 'advgw_tidal.evt', 'riv_obs.csv']
CC0-1.0
examples/Notebooks/flopy3_mf6_B_complex-model.ipynb
gyanz/flopy
Run the SimulationWe can also run the simulation from the notebook, but only if the MODFLOW 6 executable is available. The executable can be made available by putting the executable in a folder that is listed in the system path variable. Another option is to just put a copy of the executable in the simulation folder, though this should generally be avoided. A final option is to provide a full path to the executable when the simulation is constructed. This would be done by specifying exe_name with the full path.
# Run the simulation success, buff = sim.run_simulation() print('\nSuccess is: ', success)
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/mf6 MODFLOW 6 U.S. GEOLOGICAL SURVEY MODULAR HYDROLOGIC MODEL VERSION 6.0.3 08/09/2018 MODFLOW 6 compiled Sep 24 2018 16:09:01 with GFORTRAN compiler (ver. 6.4.0) This software has been approved for release by the U.S. Geological Survey (USGS). Although the software has been subjected to rigorous review, the USGS reserves the right to update the software as needed pursuant to further analysis and review. No warranty, expressed or implied, is made by the USGS or the U.S. Government as to the functionality of the software and related material nor shall the fact of release constitute any such warranty. Furthermore, the software is released on condition that neither the USGS nor the U.S. Government shall be held liable for any damages resulting from its authorized or unauthorized use. Also refer to the USGS Water Resources Software User Rights Notice for complete use, copyright, and distribution information. Run start date and time (yyyy/mm/dd hh:mm:ss): 2018/10/19 16:29:49 Writing simulation list file: mfsim.lst Using Simulation name file: mfsim.nam Solving: Stress period: 1 Time step: 1 Solving: Stress period: 2 Time step: 1 Solving: Stress period: 2 Time step: 2 Solving: Stress period: 2 Time step: 3 Solving: Stress period: 2 Time step: 4 Solving: Stress period: 2 Time step: 5 Solving: Stress period: 2 Time step: 6 Solving: Stress period: 2 Time step: 7 Solving: Stress period: 2 Time step: 8 Solving: Stress period: 2 Time step: 9 Solving: Stress period: 2 Time step: 10 Solving: Stress period: 2 Time step: 11 Solving: Stress period: 2 Time step: 12 Solving: Stress period: 2 Time step: 13 Solving: Stress period: 2 Time step: 14 Solving: Stress period: 2 Time step: 15 Solving: Stress period: 2 Time step: 16 Solving: Stress period: 2 Time step: 17 Solving: Stress period: 2 Time step: 18 Solving: Stress period: 2 Time step: 19 Solving: Stress period: 2 Time step: 20 Solving: Stress period: 2 Time step: 21 Solving: Stress period: 2 Time step: 22 Solving: Stress period: 2 Time step: 23 Solving: Stress period: 2 Time step: 24 Solving: Stress period: 2 Time step: 25 Solving: Stress period: 2 Time step: 26 Solving: Stress period: 2 Time step: 27 Solving: Stress period: 2 Time step: 28 Solving: Stress period: 2 Time step: 29 Solving: Stress period: 2 Time step: 30 Solving: Stress period: 2 Time step: 31 Solving: Stress period: 2 Time step: 32 Solving: Stress period: 2 Time step: 33 Solving: Stress period: 2 Time step: 34 Solving: Stress period: 2 Time step: 35 Solving: Stress period: 2 Time step: 36 Solving: Stress period: 2 Time step: 37 Solving: Stress period: 2 Time step: 38 Solving: Stress period: 2 Time step: 39 Solving: Stress period: 2 Time step: 40 Solving: Stress period: 2 Time step: 41 Solving: Stress period: 2 Time step: 42 Solving: Stress period: 2 Time step: 43 Solving: Stress period: 2 Time step: 44 Solving: Stress period: 2 Time step: 45 Solving: Stress period: 2 Time step: 46 Solving: Stress period: 2 Time step: 47 Solving: Stress period: 2 Time step: 48 Solving: Stress period: 2 Time step: 49 Solving: Stress period: 2 Time step: 50 Solving: Stress period: 2 Time step: 51 Solving: Stress period: 2 Time step: 52 Solving: Stress period: 2 Time step: 53 Solving: Stress period: 2 Time step: 54 Solving: Stress period: 2 Time step: 55 Solving: Stress period: 2 Time step: 56 Solving: Stress period: 2 Time step: 57 Solving: Stress period: 2 Time step: 58 Solving: Stress period: 2 Time step: 59 Solving: Stress period: 2 Time step: 60 Solving: Stress period: 2 Time step: 61 Solving: Stress period: 2 Time step: 62 Solving: Stress period: 2 Time step: 63 Solving: Stress period: 2 Time step: 64 Solving: Stress period: 2 Time step: 65 Solving: Stress period: 2 Time step: 66 Solving: Stress period: 2 Time step: 67 Solving: Stress period: 2 Time step: 68 Solving: Stress period: 2 Time step: 69 Solving: Stress period: 2 Time step: 70 Solving: Stress period: 2 Time step: 71 Solving: Stress period: 2 Time step: 72 Solving: Stress period: 2 Time step: 73 Solving: Stress period: 2 Time step: 74 Solving: Stress period: 2 Time step: 75 Solving: Stress period: 2 Time step: 76 Solving: Stress period: 2 Time step: 77 Solving: Stress period: 2 Time step: 78 Solving: Stress period: 2 Time step: 79 Solving: Stress period: 2 Time step: 80 Solving: Stress period: 2 Time step: 81 Solving: Stress period: 2 Time step: 82 Solving: Stress period: 2 Time step: 83 Solving: Stress period: 2 Time step: 84 Solving: Stress period: 2 Time step: 85 Solving: Stress period: 2 Time step: 86 Solving: Stress period: 2 Time step: 87 Solving: Stress period: 2 Time step: 88 Solving: Stress period: 2 Time step: 89 Solving: Stress period: 2 Time step: 90 Solving: Stress period: 2 Time step: 91 Solving: Stress period: 2 Time step: 92 Solving: Stress period: 2 Time step: 93 Solving: Stress period: 2 Time step: 94 Solving: Stress period: 2 Time step: 95 Solving: Stress period: 2 Time step: 96 Solving: Stress period: 2 Time step: 97 Solving: Stress period: 2 Time step: 98 Solving: Stress period: 2 Time step: 99 Solving: Stress period: 2 Time step: 100 Solving: Stress period: 2 Time step: 101 Solving: Stress period: 2 Time step: 102 Solving: Stress period: 2 Time step: 103 Solving: Stress period: 2 Time step: 104 Solving: Stress period: 2 Time step: 105 Solving: Stress period: 2 Time step: 106 Solving: Stress period: 2 Time step: 107 Solving: Stress period: 2 Time step: 108 Solving: Stress period: 2 Time step: 109 Solving: Stress period: 2 Time step: 110 Solving: Stress period: 2 Time step: 111 Solving: Stress period: 2 Time step: 112 Solving: Stress period: 2 Time step: 113 Solving: Stress period: 2 Time step: 114 Solving: Stress period: 2 Time step: 115 Solving: Stress period: 2 Time step: 116 Solving: Stress period: 2 Time step: 117 Solving: Stress period: 2 Time step: 118 Solving: Stress period: 2 Time step: 119 Solving: Stress period: 2 Time step: 120 Solving: Stress period: 3 Time step: 1 Solving: Stress period: 3 Time step: 2 Solving: Stress period: 3 Time step: 3 Solving: Stress period: 3 Time step: 4 Solving: Stress period: 3 Time step: 5 Solving: Stress period: 3 Time step: 6 Solving: Stress period: 3 Time step: 7 Solving: Stress period: 3 Time step: 8 Solving: Stress period: 3 Time step: 9 Solving: Stress period: 3 Time step: 10 Solving: Stress period: 3 Time step: 11 Solving: Stress period: 3 Time step: 12 Solving: Stress period: 3 Time step: 13 Solving: Stress period: 3 Time step: 14 Solving: Stress period: 3 Time step: 15 Solving: Stress period: 3 Time step: 16 Solving: Stress period: 3 Time step: 17 Solving: Stress period: 3 Time step: 18 Solving: Stress period: 3 Time step: 19 Solving: Stress period: 3 Time step: 20 Solving: Stress period: 3 Time step: 21 Solving: Stress period: 3 Time step: 22 Solving: Stress period: 3 Time step: 23 Solving: Stress period: 3 Time step: 24 Solving: Stress period: 3 Time step: 25 Solving: Stress period: 3 Time step: 26 Solving: Stress period: 3 Time step: 27 Solving: Stress period: 3 Time step: 28 Solving: Stress period: 3 Time step: 29 Solving: Stress period: 3 Time step: 30 Solving: Stress period: 3 Time step: 31 Solving: Stress period: 3 Time step: 32 Solving: Stress period: 3 Time step: 33 Solving: Stress period: 3 Time step: 34 Solving: Stress period: 3 Time step: 35 Solving: Stress period: 3 Time step: 36 Solving: Stress period: 3 Time step: 37 Solving: Stress period: 3 Time step: 38 Solving: Stress period: 3 Time step: 39 Solving: Stress period: 3 Time step: 40 Solving: Stress period: 3 Time step: 41 Solving: Stress period: 3 Time step: 42 Solving: Stress period: 3 Time step: 43 Solving: Stress period: 3 Time step: 44 Solving: Stress period: 3 Time step: 45 Solving: Stress period: 3 Time step: 46 Solving: Stress period: 3 Time step: 47 Solving: Stress period: 3 Time step: 48 Solving: Stress period: 3 Time step: 49 Solving: Stress period: 3 Time step: 50 Solving: Stress period: 3 Time step: 51 Solving: Stress period: 3 Time step: 52 Solving: Stress period: 3 Time step: 53 Solving: Stress period: 3 Time step: 54 Solving: Stress period: 3 Time step: 55 Solving: Stress period: 3 Time step: 56 Solving: Stress period: 3 Time step: 57 Solving: Stress period: 3 Time step: 58 Solving: Stress period: 3 Time step: 59 Solving: Stress period: 3 Time step: 60 Solving: Stress period: 3 Time step: 61 Solving: Stress period: 3 Time step: 62 Solving: Stress period: 3 Time step: 63 Solving: Stress period: 3 Time step: 64 Solving: Stress period: 3 Time step: 65 Solving: Stress period: 3 Time step: 66 Solving: Stress period: 3 Time step: 67 Solving: Stress period: 3 Time step: 68 Solving: Stress period: 3 Time step: 69 Solving: Stress period: 3 Time step: 70 Solving: Stress period: 3 Time step: 71 Solving: Stress period: 3 Time step: 72 Solving: Stress period: 3 Time step: 73 Solving: Stress period: 3 Time step: 74 Solving: Stress period: 3 Time step: 75 Solving: Stress period: 3 Time step: 76 Solving: Stress period: 3 Time step: 77 Solving: Stress period: 3 Time step: 78 Solving: Stress period: 3 Time step: 79 Solving: Stress period: 3 Time step: 80 Solving: Stress period: 3 Time step: 81 Solving: Stress period: 3 Time step: 82 Solving: Stress period: 3 Time step: 83 Solving: Stress period: 3 Time step: 84 Solving: Stress period: 3 Time step: 85 Solving: Stress period: 3 Time step: 86 Solving: Stress period: 3 Time step: 87 Solving: Stress period: 3 Time step: 88 Solving: Stress period: 3 Time step: 89 Solving: Stress period: 3 Time step: 90 Solving: Stress period: 3 Time step: 91 Solving: Stress period: 3 Time step: 92 Solving: Stress period: 3 Time step: 93 Solving: Stress period: 3 Time step: 94 Solving: Stress period: 3 Time step: 95 Solving: Stress period: 3 Time step: 96 Solving: Stress period: 3 Time step: 97 Solving: Stress period: 3 Time step: 98 Solving: Stress period: 3 Time step: 99 Solving: Stress period: 3 Time step: 100 Solving: Stress period: 3 Time step: 101
CC0-1.0
examples/Notebooks/flopy3_mf6_B_complex-model.ipynb
gyanz/flopy
Post-Process Head ResultsPost-processing MODFLOW 6 results is still a work in progress. There aren't any Flopy plotting functions built in yet, like they are for other MODFLOW versions. So we need to plot the results using general Flopy capabilities. We can also use some of the Flopy ModelMap capabilities for MODFLOW 6, but in order to do so, we need to manually create a SpatialReference object, that is needed for the plotting. Examples of both approaches are shown below.First, a link to the heads file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by specifying, in this case, the step number and period number for which we want to retrieve data. A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions are used to make contours of the layers or a cross-section.
# Read the binary head file and plot the results # We can use the existing Flopy HeadFile class because # the format of the headfile for MODFLOW 6 is the same # as for previous MODFLOW verions headfile = '{}.hds'.format(model_name) fname = os.path.join(workspace, headfile) hds = flopy.utils.binaryfile.HeadFile(fname) h = hds.get_data() # We can also use the Flopy model map capabilities for MODFLOW 6 # but in order to do so, we need to manually create a # SpatialReference object fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(1, 1, 1, aspect='equal') sr = flopy.utils.reference.SpatialReference(delr=dis.delr[:], delc=dis.delc[:]) # Next we create an instance of the ModelMap class modelmap = flopy.plot.ModelMap(sr=sr) # Then we can use the plot_grid() method to draw the grid # The return value for this function is a matplotlib LineCollection object, # which could be manipulated (or used) later if necessary. #quadmesh = modelmap.plot_ibound(ibound=ibd) linecollection = modelmap.plot_grid() contours = modelmap.contour_array(h[0])
_____no_output_____
CC0-1.0
examples/Notebooks/flopy3_mf6_B_complex-model.ipynb
gyanz/flopy
Post-Process FlowsMODFLOW 6 writes a binary grid file, which contains information about the model grid. MODFLOW 6 also writes a binary budget file, which contains flow information. Both of these files can be read using Flopy capabilities. The MfGrdFile class in Flopy can be used to read the binary grid file. The CellBudgetFile class in Flopy can be used to read the binary budget file written by MODFLOW 6.
# read the binary grid file fname = os.path.join(workspace, '{}.dis.grb'.format(model_name)) bgf = flopy.utils.mfgrdfile.MfGrdFile(fname) # data read from the binary grid file is stored in a dictionary bgf._datadict # Information from the binary grid file is easily retrieved ia = bgf._datadict['IA'] - 1 ja = bgf._datadict['JA'] - 1 # read the cell budget file fname = os.path.join(workspace, '{}.cbb'.format(model_name)) cbb = flopy.utils.CellBudgetFile(fname, precision='double') #cbb.list_records() flowja = cbb.get_data(text='FLOW-JA-FACE')[0][0, 0, :] # By having the ia and ja arrays and the flow-ja-face we can look at # the flows for any cell and process them in the follow manner. k = 2; i = 7; j = 7 celln = k * nrow * ncol + i * nrow + j print('Printing flows for cell {}'.format(celln + 1)) for ipos in range(ia[celln] + 1, ia[celln + 1]): cellm = ja[ipos] # change from one-based to zero-based print('Cell {} flow with cell {} is {}'.format(celln + 1, cellm + 1, flowja[ipos])) fname = 'head-hydrographs.csv' fname = os.path.join(workspace, fname) csv = np.genfromtxt(fname, delimiter=',', dtype=None, names=True) for name in csv.dtype.names[1:]: plt.plot(csv['time'], csv[name], label=name) plt.legend()
_____no_output_____
CC0-1.0
examples/Notebooks/flopy3_mf6_B_complex-model.ipynb
gyanz/flopy
Clustering text documents using k-meansAs an example we'll be using the 20 newsgroups dataset consists of 18000+ newsgroup posts on 20 topics. You can learn more about the dataset at http://qwone.com/~jason/20Newsgroups/
from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import Normalizer from sklearn import metrics import matplotlib.pyplot as plt from sklearn.cluster import KMeans, MiniBatchKMeans import numpy as np
_____no_output_____
MIT
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb
manual123/Nacho-Jupyter-Notebooks
Load data
newsgroups_train = fetch_20newsgroups(subset='train') print(list(newsgroups_train.target_names))
['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']
MIT
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb
manual123/Nacho-Jupyter-Notebooks
To keep it simple, let's filter only 3 topics. Assume that we do not know the topics, let's run clustering algorithm and examine the keywords of each clusters
categories = ['alt.atheism', 'comp.graphics', 'rec.motorcycles'] dataset = fetch_20newsgroups(subset='all', categories=categories, shuffle=True, random_state=2017) print("%d documents" % len(dataset.data)) print("%d categories" % len(dataset.target_names)) labels = dataset.target print("Extracting features from the dataset using a sparse vectorizer") vectorizer = TfidfVectorizer(stop_words='english') X = vectorizer.fit_transform(dataset.data) print("n_samples: %d, n_features: %d" % X.shape)
2768 documents 3 categories Extracting features from the dataset using a sparse vectorizer n_samples: 2768, n_features: 35311
MIT
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb
manual123/Nacho-Jupyter-Notebooks
LSA via SVDLatent Semantic Analysis (LSA) is a mathematical method that tries to bring out latent relationships within a collection of documents. Rather than looking at each document isolated from the others it looks at all the documents as a whole and the terms within them to identify relationships. Let's perform LSA by running SVD on the data to reduce the dimensionality. SVD of matrix A = U * ∑ * VT* r = rank of matrix X* U = column orthonormal m * r matrix* ∑ = diagonal r * r matrix with singular value sorted in descending order* V = column orthonormal r * n matrixIn our case we have 3 topics, 2768 documents and 35311 word vocabulary. * Original matrix = 2768*35311 ~ 10^8* SVD = 3*2768 + 3 + 3*35311 ~ 10^5.3 Resulted SVD is taking approximately 460 times less space than original matrix.
from IPython.display import Image Image(filename='../Chapter 5 Figures/SVD.png', width=500) from sklearn.decomposition import TruncatedSVD # Lets reduce the dimensionality to 2000 svd = TruncatedSVD(2000) lsa = make_pipeline(svd, Normalizer(copy=False)) X = lsa.fit_transform(X) explained_variance = svd.explained_variance_ratio_.sum() print("Explained variance of the SVD step: {}%".format(int(explained_variance * 100)))
Explained variance of the SVD step: 95%
MIT
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb
manual123/Nacho-Jupyter-Notebooks
k-means clustering
from __future__ import print_function km = KMeans(n_clusters=3, init='k-means++', max_iter=100, n_init=1) # Scikit learn provides MiniBatchKMeans to run k-means in batch mode suitable for a very large corpus # km = MiniBatchKMeans(n_clusters=5, init='k-means++', n_init=1, init_size=1000, batch_size=1000) print("Clustering sparse data with %s" % km) km.fit(X) print("Top terms per cluster:") original_space_centroids = svd.inverse_transform(km.cluster_centers_) order_centroids = original_space_centroids.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(3): print("Cluster %d:" % i, end='') for ind in order_centroids[i, :10]: print(' %s' % terms[ind], end='') print()
Clustering sparse data with KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=100, n_clusters=3, n_init=1, n_jobs=1, precompute_distances='auto', random_state=None, tol=0.0001, verbose=0) Top terms per cluster: Cluster 0: edu graphics university god subject lines organization com posting uk Cluster 1: com bike edu dod ca writes article sun like organization Cluster 2: keith sgi livesey caltech com solntze wpd jon edu sandvik
MIT
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb
manual123/Nacho-Jupyter-Notebooks
Hierarchical clustering
from sklearn.metrics.pairwise import cosine_similarity dist = 1 - cosine_similarity(X) from scipy.cluster.hierarchy import ward, dendrogram linkage_matrix = ward(dist) #define the linkage_matrix using ward clustering pre-computed distances fig, ax = plt.subplots(figsize=(8, 8)) # set size ax = dendrogram(linkage_matrix, orientation="right") plt.tick_params(axis= 'x', which='both', bottom='off', top='off', labelbottom='off') plt.tight_layout() #show plot with tight layout plt.show()
_____no_output_____
MIT
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb
manual123/Nacho-Jupyter-Notebooks
10 May 2017 - Lecture 2 JNB Code Along - WH Nixalo[Notebook](https://github.com/fastai/courses/blob/ed1fb08d86df277d2736972a1ff1ac39ea1ac733/deeplearning1/nbs/lesson2.ipynb) | Lecture[1:20:00](https://www.youtube.com/watch?v=e3aM6XTekJc) 1 Linear models with CNN features
# This is to point Python to my utils folder import sys; import os # DIR = %pwd sys.path.insert(1, os.path.join('../utils')) # Rather than importing everything manually, we'll make things easy # and load them all in utils.py, and just import them from there. import utils; reload(utils) from utils import * %matplotlib inline
Using Theano backend.
MIT
FAI_old/lesson2/lesson2_codealong.ipynb
WNoxchi/Kawkasos
1.1 IntroWe need to find a way to convert the imagenet predictions to a probability of being a cat or a dog, since that is what the Kaggle copmetition requires us to submit. We could use the imagenet hierarchy to download a list of all the imagenet categories in each of the dog and cat groups, and could then solve our problem in various ways, such as:* Finding the largest probability that's either a cat or a dog, and using that label* Averaging the prbability of all the cat categories and comparing it to the average of all the dog categories.But these approaches have some downsides:* They require manual coding for something that we should be able to learn from the data* They ignore information available in the predictions; for instance, if the models predict that there is a bone in th eimage, it's more likely to be a dog than a cat.A very simple solution to both of these problems is to learn a linear model that is trained using the 1,000 predictions from the imagenet model for each image as input, and the dog/cat label as target.
%matplotlib inline from __future__ import division, print_function import os, json from glob import glob import numpy as np import scipy from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import confusion_matrix np.set_printoptions(precision=4, linewidth=100) from matplotlib import pyplot as plt import utils; reload(utils) from utils import plots, get_batches, plot_confusion_matrix, get_data from numpy.random import random, permutation from scipy import misc, ndimage from scipy.ndimage.interpolation import zoom import keras from keras import backend as K from keras.utils.data_utils import get_file from keras.models import Sequential from keras.layers import Input from keras.layers.core import Flatten, Dense, Dropout, Lambda from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D from keras.preprocessing import image
_____no_output_____
MIT
FAI_old/lesson2/lesson2_codealong.ipynb
WNoxchi/Kawkasos
1.2 Linear models in kerasLet's forget the motivating example for a second a see how we can create a simple Linear model in Keras:Each of the ```Dense()``` layers is just a *linear* model, followed by a simple *activation function*.In a linear model each row is calculated as ```sum(row * weights)```, where weights need to be learnt from the data & will be the same for every row. Let's create some data that we know is linearly related:
# we'll create a random matrix w/ 2 columns; & do a MatMul to get our # y value using a vector [2, 3] & adding a constant of 1. x = random((30, 2)) y = np.dot(x, [2., 3.]) + 1. x[:5] y[:5]
_____no_output_____
MIT
FAI_old/lesson2/lesson2_codealong.ipynb
WNoxchi/Kawkasos
We can use kears to create a simple linear model (```Dense()``` - with no activation - in Keras) and optimize it using SGD to minimize mean squared error.
# Keras calls the Linear Model "Dense"; aka. "Fully-Connected" in other # libraries. # So when we go 'Dense' w/ an input of 2 columns, & output of 1 col, # we're defining a linear model that can go from the 2 col array above, to # the 1 col output of y above. # Sequential() is a way of building multiple-layer networks. It takes an # array containing all the layers in your NN. A LM is a single Dense layer. # This automatically initializes the weights sensibly & calc derivatives. # We just tell it how to optimize the weights: SGD w/ LR=0.1, minz(MSE). lm = Sequential([Dense(1, input_shape=(2,))]) lm.compile(optimizer=SGD(lr=0.1), loss='mse') # find out our loss function w random weights lm.evaluate(x, y, verbose=0) # now run SGD for 5 epochs & watch the loss improve # lm.fit(..) does the solving lm.fit(x, y, nb_epoch=5, batch_size=1) # now evaluate and see the improvement: lm.evaluate(x, y, verbose=0) # take a look at the weights, they should be virt. equal to 2, 3, and 1: lm.get_weights() # so let's run another 5 epochs and see if this improves things: lm.fit(x, y, nb_epoch=5, batch_size=1) lm.evaluate(x, y, verbose=0) # and take a look at the new weights: lm.get_weights()
_____no_output_____
MIT
FAI_old/lesson2/lesson2_codealong.ipynb
WNoxchi/Kawkasos
Above is everything Keras is doing behind the scenes.So, if we pass multiple layers to Keras via ```Sequential(..)```, we can start to build & optimize Deep Neural Networks.Before that, we can still use the single-layer LM to create a pretty decent entry to the dogs-vs-cats Kaggle competition. 1.3 Train Linear Model on PredictionsForgetting finetuning -- how do we take the output of an ImageNet network and as simply as possible, create a a good entry to the cats-vs-dogs competition? -- Our current ImageNet network returns a thousand probabilities but we need just cat vs dog. We don't want to manually write code to roll of the hierarchy into cats/dogs.So what we can do is learn a Linear Model that takes the output of the ImageNet model, all it's 1000 predictions, and uses that as input, and uses the dog/cat label as the target -- and that LM would solve our problem. 1.3.1 Training the modelWe start with some basic config steps. We copy a small amount of our data into a 'sample' directory, with the exact same structure as our 'train' directory -- this is *always* a good idea in *all* Machine Learning, since we should do all of our initial testing using a dataset small enough that we never have to wait for it.
# setup the directories os.mkdir('data') os.mkdir('data/dogscats') path = "data/dogscats/" model_path = path + 'models/' # if the path to our models DNE, make it if not os.path.exists(model_path): os.mkdir(model_path) # NOTE: os.mkdir(..) only works for a single folder # Also will throw error if dir already exists
_____no_output_____
MIT
FAI_old/lesson2/lesson2_codealong.ipynb
WNoxchi/Kawkasos
We'll process as many images at a time as we can. This is a case of T&E to find the max batch size that doesn't cause a memory error.
batch_size = 100
_____no_output_____
MIT
FAI_old/lesson2/lesson2_codealong.ipynb
WNoxchi/Kawkasos