hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e74e83a72fbf8bd711d95c60397688b19975256c | 220,494 | ipynb | Jupyter Notebook | notebooks/_chapter-01-page-042-image-segmentation.ipynb | philipwalsh/fastai.100days | f478b9712dbbb9eaa36b5e65ba890231113d656b | [
"MIT"
] | null | null | null | notebooks/_chapter-01-page-042-image-segmentation.ipynb | philipwalsh/fastai.100days | f478b9712dbbb9eaa36b5e65ba890231113d656b | [
"MIT"
] | null | null | null | notebooks/_chapter-01-page-042-image-segmentation.ipynb | philipwalsh/fastai.100days | f478b9712dbbb9eaa36b5e65ba890231113d656b | [
"MIT"
] | null | null | null | 984.348214 | 215,412 | 0.956557 | [
[
[
"from fastai2.vision.all import *",
"_____no_output_____"
],
[
"path=untar_data(URLs.CAMVID_TINY)",
"_____no_output_____"
],
[
"dls=SegmentationDataLoaders.from_label_func( path, bs=8 , fnames=get_image_files(path/\"images\"), \n label_func=lambda o: path/'labels'/f'{o.stem}_P{o.suffix}', \n codes=np.loadtxt(path/'codes.txt', dtype=str))",
"_____no_output_____"
],
[
"learn=unet_learner(dls, resnet34)",
"_____no_output_____"
],
[
"learn.fine_tune(8)",
"_____no_output_____"
],
[
"learn.show_results(max_n=6, figsize=(10,10))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74e8e8001038695567a412976e8ddb12627927f | 5,522 | ipynb | Jupyter Notebook | notebooks/book1/03/gauss_plot_2d.ipynb | patel-zeel/pyprobml | 027ef3c13a2a63d958e05fdedb68fd7b8f0e0261 | [
"MIT"
] | null | null | null | notebooks/book1/03/gauss_plot_2d.ipynb | patel-zeel/pyprobml | 027ef3c13a2a63d958e05fdedb68fd7b8f0e0261 | [
"MIT"
] | 1 | 2022-03-27T04:59:50.000Z | 2022-03-27T04:59:50.000Z | notebooks/book1/03/gauss_plot_2d.ipynb | patel-zeel/pyprobml | 027ef3c13a2a63d958e05fdedb68fd7b8f0e0261 | [
"MIT"
] | 2 | 2022-03-26T11:52:36.000Z | 2022-03-27T05:17:48.000Z | 30.849162 | 111 | 0.531148 | [
[
[
"## Visualization of a 2d Gaussian density as a surface and contour plots ",
"_____no_output_____"
]
],
[
[
"import jax\nimport jax.numpy as jnp\nimport jax.scipy\nfrom jax.config import config\nfrom jax.scipy.stats import multivariate_normal\nfrom matplotlib import colors\nfrom matplotlib.colors import LightSource\nfrom mpl_toolkits.mplot3d import axes3d\nimport numpy as np\nimport seaborn as sns\nimport os\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nconfig.update(\"jax_enable_x64\", True)\n\n\ntry:\n from probml_utils import savefig, latexify\nexcept ModuleNotFoundError:\n %pip install -qq git+https: // github.com/probml/probml-utils.git\n from probml_utils import savefig, latexify",
"_____no_output_____"
],
[
"#### finding multivariate pdf ####\n\ngaussians = [\"Full\", \"Diagonal\", \"Spherical\"]\n\n# Mean and Covaraince\nmean = jnp.array([0, 0])\ncovariance = {\n \"Full\": jnp.array([[2, 1.8], [1.8, 2]]),\n \"Diagonal\": jnp.array([[1, 0], [0, 3]]),\n \"Spherical\": jnp.array([[1, 0], [0, 1]]),\n}\n\n# Multivariate gaussian PDF\n\n\ndef gaussian_pdf(x, y, G):\n return multivariate_normal.pdf(jnp.array([x, y]), mean=mean, cov=covariance[G])\n\n\n# Defining Meshgrid\nstart_point = 5\nstop_point = 5\nnum_samples = 100\npoints = jnp.linspace(-start_point, stop_point, num_samples)\nX, Y = jnp.meshgrid(points, points)",
"_____no_output_____"
],
[
"##### Plots to show probability distribution #####\n\n# contour plot\ndef make_contour_plot(gauss, fig=None, ax=None):\n # vectorizing\n Z = jax.vmap(jax.vmap(gaussian_pdf, (0, 0, None)), (1, 1, None))(X, Y, gauss)\n\n if fig is None:\n fig, ax = plt.subplots()\n\n ax.contour(Y, X, Z)\n ax.set_xlabel(\"$y_1$\")\n ax.set_ylabel(\"$y_2$\")\n plt.axis(\"equal\")\n sns.despine()\n plt.title(gauss, fontweight=\"bold\")\n plt.draw()\n savefig(\"gaussPlot2dDemoContour{}_latexified\".format(gauss))\n plt.show()\n\n\n# Surface plot\n\n\ndef make_surface_plot(gauss, fig=None, ax=None):\n # vectorizing\n Z = jax.vmap(jax.vmap(gaussian_pdf, (0, 0, None)), (1, 1, None))(X, Y, gauss)\n\n if fig is None:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n\n ls = LightSource(azdeg=30, altdeg=55)\n cmap_color = ls.shade(np.array(Z), cmap=plt.cm.gray, blend_mode=\"hsv\", norm=colors.PowerNorm(0))\n ax.plot_surface(Y, X, Z, antialiased=True, facecolors=cmap_color, rcount=200, ccount=200)\n ax.set_aspect(\"auto\")\n sns.despine()\n ax.view_init(elev=30, azim=55)\n ax.set_xlabel(\"$y_1$\")\n ax.set_ylabel(\"$y_2$\")\n ax.zaxis.set_rotate_label(False)\n ax.set_zlabel(\"$p(y_1, y_2)$\", rotation=90, labelpad=6)\n plt.title(gauss, fontweight=\"bold\")\n plt.draw()\n savefig(\"gaussPlot2dDemoSurf{}_latexified\".format(gauss))\n plt.show()\n\n\n# plotting for different gaussians\nfor gauss in gaussians:\n latexify(width_scale_factor=3, fig_height=1.5)\n make_contour_plot(gauss, fig=None, ax=None)\n latexify(width_scale_factor=1.5, fig_height=3)\n make_surface_plot(gauss, fig=None, ax=None)",
"_____no_output_____"
],
[
"# Plotting contour subplots for colorbars for fig 3.6\nlatexify(width_scale_factor=0.5, fig_height=3.5)\nfig, axes = plt.subplots(nrows=1, ncols=3)\nind = 0\nfor ax in axes.flat:\n gauss = gaussians[ind]\n ind += 1\n Z = jax.vmap(jax.vmap(gaussian_pdf, (0, 0, None)), (1, 1, None))(X, Y, gauss)\n im = ax.contour(Y, X, Z)\n ax.axis(\"equal\")\n ax.set_xlabel(\"$y_1$\")\n ax.set_ylabel(\"$y_2$\")\n ax.set_title(gauss, fontsize=9)\n\nfig.subplots_adjust(right=0.8, hspace=0.5)\ncbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\ncbar = fig.colorbar(im, cax=cbar_ax)\ncbar.set_label(\"$p(y_1, y_2)$\")\nsns.despine()\nplt.draw()\nsavefig(\"gaussPlot2dDemoContour_latexified\", tight_layout=False)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e74e990aa5457bb59e12bfbd20460b47f5928ac6 | 559,920 | ipynb | Jupyter Notebook | Chapter03/Exercise02/Exercise02.ipynb | MaheshPackt/Data-Science-Projects-2nd | 400f575c90945227fc74c7bb07cc7c7b1e413969 | [
"MIT"
] | null | null | null | Chapter03/Exercise02/Exercise02.ipynb | MaheshPackt/Data-Science-Projects-2nd | 400f575c90945227fc74c7bb07cc7c7b1e413969 | [
"MIT"
] | null | null | null | Chapter03/Exercise02/Exercise02.ipynb | MaheshPackt/Data-Science-Projects-2nd | 400f575c90945227fc74c7bb07cc7c7b1e413969 | [
"MIT"
] | null | null | null | 1,227.894737 | 189,072 | 0.956487 | [
[
[
"import numpy as np #numerical computation\nimport pandas as pd #data wrangling\nimport matplotlib.pyplot as plt #plotting package\n#Next line helps with rendering plots\n%matplotlib inline\nimport matplotlib as mpl #add'l plotting functionality\nimport seaborn as sns #a fancy plotting package\nmpl.rcParams['figure.dpi'] = 400 #high res figures",
"_____no_output_____"
]
],
[
[
"Load data",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../../Data/Chapter_1_cleaned_data.csv')",
"_____no_output_____"
],
[
"features_response = df.columns.tolist()",
"_____no_output_____"
],
[
"items_to_remove = ['ID', 'SEX',\n 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6',\n 'EDUCATION_CAT',\n 'graduate school', 'high school', 'none',\n 'others', 'university']",
"_____no_output_____"
],
[
"features_response = [item for item in features_response if item not in items_to_remove]\nfeatures_response",
"_____no_output_____"
],
[
"X = df[features_response].iloc[:,:-1].values\ny = df[features_response].iloc[:,-1].values\nprint(X.shape, y.shape)",
"(26664, 17) (26664,)\n"
]
],
[
[
"# Exercise 3.02: Visualizing the Relationship Between Features and Response",
"_____no_output_____"
]
],
[
[
"overall_default_rate = df['default payment next month'].mean()\noverall_default_rate",
"_____no_output_____"
],
[
"group_by_pay_mean_y = df.groupby('PAY_1').agg(\n {'default payment next month':np.mean})\ngroup_by_pay_mean_y",
"_____no_output_____"
],
[
"axes = plt.axes()\naxes.axhline(overall_default_rate, color='red')\ngroup_by_pay_mean_y.plot(marker='x', legend=False, ax=axes)\naxes.set_ylabel('Proportion of credit defaults')\naxes.legend(['Entire dataset', 'Groups of PAY_1'])",
"_____no_output_____"
],
[
"pos_mask = y == 1\nneg_mask = y == 0",
"_____no_output_____"
],
[
"axes = plt.axes()\naxes.hist(df.loc[neg_mask, 'LIMIT_BAL'],\n edgecolor='black', color='white')\naxes.hist(df.loc[pos_mask, 'LIMIT_BAL'],\n alpha=0.5, edgecolor=None, color='black')\naxes.tick_params(axis='x', labelrotation=45)\naxes.set_xlabel('Credit limit (NT$)')\naxes.set_ylabel('Number of accounts')\naxes.legend(['Not defaulted', 'Defaulted'])\naxes.set_title('Credit limits by response variable')",
"_____no_output_____"
],
[
"df['LIMIT_BAL'].max()",
"_____no_output_____"
],
[
"bin_edges = list(range(0,850000,50000))\nprint(bin_edges)",
"[0, 50000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000, 800000]\n"
],
[
"mpl.rcParams['figure.dpi'] = 400 \naxes = plt.axes()\naxes.hist(\n df.loc[neg_mask, 'LIMIT_BAL'],\n bins=bin_edges, density=True,\n edgecolor='black', color='white')\naxes.hist(\n df.loc[pos_mask, 'LIMIT_BAL'],\n bins=bin_edges, density=True, alpha=0.5,\n edgecolor=None, color='black')\naxes.tick_params(axis='x', labelrotation=45)\naxes.set_xlabel('Credit limit (NT$)')\naxes.set_ylabel('Proportion of accounts')\ny_ticks = axes.get_yticks()\naxes.set_yticklabels(np.round(y_ticks*50000,2))\naxes.legend(['Not defaulted', 'Defaulted'])\naxes.set_title(\n 'Normalized distributions of credit limits by response variable')",
"<ipython-input-14-58af298f658f>:15: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes.set_yticklabels(np.round(y_ticks*50000,2))\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74e9fac34389e8465ef0e7c3c9aaa7005b6aec3 | 125,435 | ipynb | Jupyter Notebook | Predictive Modelling/Stock-Prediction/prediction.ipynb | mukherjeetejas/Machine-learning | 3babb8d83e88f5c0cbbba2804605465119cc0958 | [
"MIT"
] | 4 | 2020-10-12T18:54:54.000Z | 2020-12-16T04:26:10.000Z | Predictive Modelling/Stock-Prediction/prediction.ipynb | mukherjeetejas/Machine-learning | 3babb8d83e88f5c0cbbba2804605465119cc0958 | [
"MIT"
] | 11 | 2020-10-10T05:47:10.000Z | 2020-10-27T15:57:39.000Z | Predictive Modelling/Stock-Prediction/prediction.ipynb | mukherjeetejas/Machine-learning | 3babb8d83e88f5c0cbbba2804605465119cc0958 | [
"MIT"
] | 25 | 2020-10-01T00:02:49.000Z | 2021-10-05T17:30:41.000Z | 56.938266 | 22,384 | 0.652266 | [
[
[
"### Stock Market Prediction And Forecasting Using Stacked LSTM",
"_____no_output_____"
]
],
[
[
"### Keras and Tensorflow >2.0",
"_____no_output_____"
],
[
"### Data Collection\nimport pandas_datareader as pdr\nkey=\"\"",
"_____no_output_____"
],
[
"df = pdr.get_data_tiingo('AAPL', api_key='11dfb33f50f81bf08437b4bbf7619d48cad950ff')",
"_____no_output_____"
],
[
"df.to_csv('AAPL.csv')",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"df=pd.read_csv('AAPL.csv')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.tail()",
"_____no_output_____"
],
[
"df1=df.reset_index()['close']",
"_____no_output_____"
],
[
"df1",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.plot(df1)",
"_____no_output_____"
],
[
"### LSTM are sensitive to the scale of the data. so we apply MinMax scaler ",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"df1",
"_____no_output_____"
],
[
"from sklearn.preprocessing import MinMaxScaler\nscaler=MinMaxScaler(feature_range=(0,1))\ndf1=scaler.fit_transform(np.array(df1).reshape(-1,1))",
"_____no_output_____"
],
[
"print(df1)",
"[[0.10772378]\n [0.10567818]\n [0.10214788]\n ...\n [0.98568082]\n [0.92724933]\n [0.92421393]]\n"
],
[
"##splitting dataset into train and test split\ntraining_size=int(len(df1)*0.65)\ntest_size=len(df1)-training_size\ntrain_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1]",
"_____no_output_____"
],
[
"training_size,test_size",
"_____no_output_____"
],
[
"train_data",
"_____no_output_____"
],
[
"import numpy\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, time_step=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-time_step-1):\n\t\ta = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100 \n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + time_step, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)",
"_____no_output_____"
],
[
"# reshape into X=t,t+1,t+2,t+3 and Y=t+4\ntime_step = 100\nX_train, y_train = create_dataset(train_data, time_step)\nX_test, ytest = create_dataset(test_data, time_step)",
"_____no_output_____"
],
[
"print(X_train.shape), print(y_train.shape)",
"(716, 100)\n(716,)\n"
],
[
"print(X_test.shape), print(ytest.shape)",
"(339, 100)\n(339,)\n"
],
[
"# reshape input to be [samples, time steps, features] which is required for LSTM\nX_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)\nX_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1)",
"_____no_output_____"
],
[
"### Create the Stacked LSTM model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM",
"_____no_output_____"
],
[
"model=Sequential()\nmodel.add(LSTM(50,return_sequences=True,input_shape=(100,1)))\nmodel.add(LSTM(50,return_sequences=True))\nmodel.add(LSTM(50))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error',optimizer='adam')\n",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm (LSTM) (None, 100, 50) 10400 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 100, 50) 20200 \n_________________________________________________________________\nlstm_2 (LSTM) (None, 50) 20200 \n_________________________________________________________________\ndense (Dense) (None, 1) 51 \n=================================================================\nTotal params: 50,851\nTrainable params: 50,851\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm (LSTM) (None, 100, 50) 10400 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 100, 50) 20200 \n_________________________________________________________________\nlstm_2 (LSTM) (None, 50) 20200 \n_________________________________________________________________\ndense (Dense) (None, 1) 51 \n=================================================================\nTotal params: 50,851\nTrainable params: 50,851\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=50,batch_size=64,verbose=1)",
"Train on 716 samples, validate on 339 samples\nEpoch 1/50\n716/716 [==============================] - 3s 4ms/sample - loss: 4.9448e-04 - val_loss: 0.0035\nEpoch 2/50\n716/716 [==============================] - 3s 4ms/sample - loss: 4.5151e-04 - val_loss: 0.0040\nEpoch 3/50\n716/716 [==============================] - 3s 4ms/sample - loss: 4.1498e-04 - val_loss: 0.0037\nEpoch 4/50\n716/716 [==============================] - 3s 4ms/sample - loss: 4.1041e-04 - val_loss: 0.0042\nEpoch 5/50\n716/716 [==============================] - 3s 4ms/sample - loss: 4.0568e-04 - val_loss: 0.0041\nEpoch 6/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.9886e-04 - val_loss: 0.0040\nEpoch 7/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.9564e-04 - val_loss: 0.0039\nEpoch 8/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.8672e-04 - val_loss: 0.0032\nEpoch 9/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.8592e-04 - val_loss: 0.0034\nEpoch 10/50\n716/716 [==============================] - 4s 5ms/sample - loss: 3.7851e-04 - val_loss: 0.0028\nEpoch 11/50\n716/716 [==============================] - 4s 6ms/sample - loss: 3.9323e-04 - val_loss: 0.0027\nEpoch 12/50\n716/716 [==============================] - 5s 6ms/sample - loss: 4.3006e-04 - val_loss: 0.0032\nEpoch 13/50\n716/716 [==============================] - 3s 5ms/sample - loss: 3.6914e-04 - val_loss: 0.0032\nEpoch 14/50\n716/716 [==============================] - 4s 6ms/sample - loss: 3.5871e-04 - val_loss: 0.0031\nEpoch 15/50\n716/716 [==============================] - 5s 7ms/sample - loss: 3.5499e-04 - val_loss: 0.0025\nEpoch 16/50\n716/716 [==============================] - 4s 5ms/sample - loss: 3.5540e-04 - val_loss: 0.0032\nEpoch 17/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.4997e-04 - val_loss: 0.0029\nEpoch 18/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.7024e-04 - val_loss: 0.0039\nEpoch 19/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.5395e-04 - val_loss: 0.0042\nEpoch 20/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.7385e-04 - val_loss: 0.0029\nEpoch 21/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.3281e-04 - val_loss: 0.0029\nEpoch 22/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.3360e-04 - val_loss: 0.0023\nEpoch 23/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.4149e-04 - val_loss: 0.0030\nEpoch 24/50\n716/716 [==============================] - 3s 4ms/sample - loss: 3.2132e-04 - val_loss: 0.0021\nEpoch 25/50\n716/716 [==============================] - 3s 5ms/sample - loss: 3.0228e-04 - val_loss: 0.0024\nEpoch 26/50\n716/716 [==============================] - 2s 3ms/sample - loss: 3.0580e-04 - val_loss: 0.0023\nEpoch 27/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.9215e-04 - val_loss: 0.0021\nEpoch 28/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.9473e-04 - val_loss: 0.0025\nEpoch 29/50\n716/716 [==============================] - 2s 3ms/sample - loss: 3.2095e-04 - val_loss: 0.0016\nEpoch 30/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.9864e-04 - val_loss: 0.0025\nEpoch 31/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.9008e-04 - val_loss: 0.0018\nEpoch 32/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.8478e-04 - val_loss: 0.0022\nEpoch 33/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.8346e-04 - val_loss: 0.0019\nEpoch 34/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.9143e-04 - val_loss: 0.0030\nEpoch 35/50\n716/716 [==============================] - 2s 3ms/sample - loss: 3.3980e-04 - val_loss: 0.0015\nEpoch 36/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.8088e-04 - val_loss: 0.0029\nEpoch 37/50\n716/716 [==============================] - 2s 3ms/sample - loss: 3.1108e-04 - val_loss: 0.0015\nEpoch 38/50\n716/716 [==============================] - 2s 3ms/sample - loss: 3.2297e-04 - val_loss: 0.0029\nEpoch 39/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.7122e-04 - val_loss: 0.0016\nEpoch 40/50\n716/716 [==============================] - 2s 3ms/sample - loss: 2.5393e-04 - val_loss: 0.0015\nEpoch 41/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.6456e-04 - val_loss: 0.0015\nEpoch 42/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.6086e-04 - val_loss: 0.0020\nEpoch 43/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.5284e-04 - val_loss: 0.0020\nEpoch 44/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.4220e-04 - val_loss: 0.0013\nEpoch 45/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.5537e-04 - val_loss: 0.0023\nEpoch 46/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.4997e-04 - val_loss: 0.0012\nEpoch 47/50\n716/716 [==============================] - 3s 4ms/sample - loss: 2.4346e-04 - val_loss: 0.0025\nEpoch 48/50\n716/716 [==============================] - 4s 6ms/sample - loss: 2.3854e-04 - val_loss: 0.0012\nEpoch 49/50\n716/716 [==============================] - 5s 6ms/sample - loss: 2.4129e-04 - val_loss: 0.0028\nEpoch 50/50\n716/716 [==============================] - 5s 7ms/sample - loss: 2.5235e-04 - val_loss: 0.0012\n"
],
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"tf.__version__",
"_____no_output_____"
],
[
"### Lets Do the prediction and check performance metrics\ntrain_predict=model.predict(X_train)\ntest_predict=model.predict(X_test)",
"_____no_output_____"
],
[
"##Transformback to original form\ntrain_predict=scaler.inverse_transform(train_predict)\ntest_predict=scaler.inverse_transform(test_predict)",
"_____no_output_____"
],
[
"### Calculate RMSE performance metrics\nimport math\nfrom sklearn.metrics import mean_squared_error\nmath.sqrt(mean_squared_error(y_train,train_predict))",
"_____no_output_____"
],
[
"### Test Data RMSE\nmath.sqrt(mean_squared_error(ytest,test_predict))",
"_____no_output_____"
],
[
"### Plotting \n# shift train predictions for plotting\nlook_back=100\ntrainPredictPlot = numpy.empty_like(df1)\ntrainPredictPlot[:, :] = np.nan\ntrainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict\n# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(df1)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :] = test_predict\n# plot baseline and predictions\nplt.plot(scaler.inverse_transform(df1))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()",
"_____no_output_____"
],
[
"len(test_data)",
"_____no_output_____"
],
[
"x_input=test_data[341:].reshape(1,-1)\nx_input.shape\n",
"_____no_output_____"
],
[
"temp_input=list(x_input)\ntemp_input=temp_input[0].tolist()",
"_____no_output_____"
],
[
"temp_input",
"_____no_output_____"
],
[
"from numpy import array\nimport matplotlib.pyplot as plt\n\ndef prediction():\n # demonstrate prediction for next 10 days\n lst_output=[]\n n_steps=100\n i=0\n while(i<30):\n \n if(len(temp_input)>100):\n #print(temp_input)\n x_input=np.array(temp_input[1:])\n print(\"{} day input {}\".format(i,x_input))\n x_input=x_input.reshape(1,-1)\n x_input = x_input.reshape((1, n_steps, 1))\n #print(x_input)\n yhat = model.predict(x_input, verbose=0)\n print(\"{} day output {}\".format(i,yhat))\n temp_input.extend(yhat[0].tolist())\n temp_input=temp_input[1:]\n #print(temp_input)\n lst_output.extend(yhat.tolist())\n i=i+1\n else:\n x_input = x_input.reshape((1, n_steps,1))\n yhat = model.predict(x_input, verbose=0)\n print(yhat[0])\n temp_input.extend(yhat[0].tolist())\n print(len(temp_input))\n lst_output.extend(yhat.tolist())\n i=i+1\n \n day_new=np.arange(1,101)\n day_pred=np.arange(101,131)\n\n plt.plot(day_new,scaler.inverse_transform(df1[1158:]))\n plt.plot(day_pred,scaler.inverse_transform(lst_output))",
"_____no_output_____"
],
[
" plt.plot(day_new,scaler.inverse_transform(df1[1158:]))\n plt.plot(day_pred,scaler.inverse_transform(lst_output))",
"_____no_output_____"
],
[
"df3=df1.tolist()\ndf3.extend(lst_output)\nplt.plot(df3[1200:])",
"_____no_output_____"
],
[
"df3=scaler.inverse_transform(df3).tolist()",
"_____no_output_____"
],
[
"plt.plot(df3)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74ea4898e675db44efd65f71c91c87a72278559 | 11,293 | ipynb | Jupyter Notebook | quests/serverlessml/07_caip/solution/export_data.ipynb | jonesevan007/training-data-analyst | 774446719316599cf221bdc5a67b00ec4c0b3ad0 | [
"Apache-2.0"
] | 2 | 2019-11-10T04:09:25.000Z | 2019-11-16T14:55:13.000Z | quests/serverlessml/07_caip/solution/export_data.ipynb | jonesevan007/training-data-analyst | 774446719316599cf221bdc5a67b00ec4c0b3ad0 | [
"Apache-2.0"
] | 10 | 2019-11-20T07:24:52.000Z | 2022-03-12T00:06:02.000Z | quests/serverlessml/07_caip/solution/export_data.ipynb | jonesevan007/training-data-analyst | 774446719316599cf221bdc5a67b00ec4c0b3ad0 | [
"Apache-2.0"
] | 4 | 2020-05-15T06:23:05.000Z | 2021-12-20T06:00:15.000Z | 29.180879 | 313 | 0.543434 | [
[
[
"# Exporting data from BigQuery to Google Cloud Storage\n\nIn this notebook, we export BigQuery data to GCS so that we can reuse our Keras model that was developed on CSV data.",
"_____no_output_____"
]
],
[
[
"%%bash\nexport PROJECT=$(gcloud config list project --format \"value(core.project)\")\necho \"Your current GCP Project Name is: \"$PROJECT",
"Your current GCP Project Name is: qwiklabs-gcp-bdc77450c97b4bf6\n"
],
[
"import os\n\nPROJECT = \"your-gcp-project-here\" # REPLACE WITH YOUR PROJECT NAME\nREGION = \"us-central1\" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1\n\n# Do not change these\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"REGION\"] = REGION\nos.environ[\"BUCKET\"] = PROJECT + '-ml' # DEFAULT BUCKET WILL BE PROJECT ID -ml\n\nif PROJECT == \"your-gcp-project-here\":\n print(\"Don't forget to update your PROJECT name! Currently:\", PROJECT)",
"_____no_output_____"
]
],
[
[
"### Create BigQuery dataset and GCS Bucket\n\nIf you haven't already, create the the BigQuery dataset and GCS Bucket we will need.",
"_____no_output_____"
]
],
[
[
"%%bash\n \n## Create a BigQuery dataset for serverlessml if it doesn't exist\ndatasetexists=$(bq ls -d | grep -w serverlessml)\n\nif [ -n \"$datasetexists\" ]; then\n echo -e \"BigQuery dataset already exists, let's not recreate it.\"\n\nelse\n echo \"Creating BigQuery dataset titled: serverlessml\"\n \n bq --location=US mk --dataset \\\n --description 'Taxi Fare' \\\n $PROJECT:serverlessml\n echo \"\\nHere are your current datasets:\"\n bq ls\nfi \n \n## Create new ML GCS bucket if it doesn't exist already...\nexists=$(gsutil ls -d | grep -w gs://${PROJECT}-ml/)\n\nif [ -n \"$exists\" ]; then\n echo -e \"Bucket exists, let's not recreate it.\"\n \nelse\n echo \"Creating a new GCS bucket.\"\n gsutil mb -l ${REGION} gs://${PROJECT}-ml\n echo -e \"\\nHere are your current buckets:\"\n gsutil ls\nfi",
"BigQuery dataset already exists, let's not recreate it.\nBucket exists, let's not recreate it.\n"
]
],
[
[
"## Create BigQuery tables",
"_____no_output_____"
],
[
"Let's create a table with 1 million examples.\n\nNote that the order of columns is exactly what was in our CSV files.",
"_____no_output_____"
]
],
[
[
"%%bigquery\nCREATE OR REPLACE TABLE serverlessml.feateng_training_data AS\n\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_datetime,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n 'unused' AS key\nFROM `nyc-tlc.yellow.trips`\nWHERE MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 1000) = 1\nAND\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0",
"_____no_output_____"
]
],
[
[
"Make the validation dataset be 1/10 the size of the training dataset.",
"_____no_output_____"
]
],
[
[
"%%bigquery\nCREATE OR REPLACE TABLE serverlessml.feateng_valid_data AS\n\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_datetime,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n 'unused' AS key\nFROM `nyc-tlc.yellow.trips`\nWHERE MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 10000) = 2\nAND\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0",
"_____no_output_____"
]
],
[
[
"## Export the tables as CSV files\n\nChange the BUCKET variable below to match a bucket that you own.",
"_____no_output_____"
]
],
[
[
"%%bash\nOUTDIR=gs://$BUCKET/quests/serverlessml/data\necho \"Deleting current contents of $OUTDIR\"\ngsutil -m -q rm -rf $OUTDIR\n\necho \"Extracting training data to $OUTDIR\"\nbq --location=US extract \\\n --destination_format CSV \\\n --field_delimiter \",\" --noprint_header \\\n serverlessml.feateng_training_data \\\n $OUTDIR/taxi-train-*.csv\n\necho \"Extracting validation data to $OUTDIR\"\nbq --location=US extract \\\n --destination_format CSV \\\n --field_delimiter \",\" --noprint_header \\\n serverlessml.feateng_valid_data \\\n $OUTDIR/taxi-valid-*.csv\n\ngsutil ls -l $OUTDIR",
"Deleting current contents of gs://qwiklabs-gcp-bdc77450c97b4bf6-ml/quests/serverlessml/data\nExtracting training data to gs://qwiklabs-gcp-bdc77450c97b4bf6-ml/quests/serverlessml/data\n\nExtracting validation data to gs://qwiklabs-gcp-bdc77450c97b4bf6-ml/quests/serverlessml/data\n\n 88345235 2019-09-23T03:22:05Z gs://qwiklabs-gcp-bdc77450c97b4bf6-ml/quests/serverlessml/data/taxi-train-000000000000.csv\n 8725746 2019-09-23T03:22:15Z gs://qwiklabs-gcp-bdc77450c97b4bf6-ml/quests/serverlessml/data/taxi-valid-000000000000.csv\nTOTAL: 2 objects, 97070981 bytes (92.57 MiB)\n"
],
[
"!gsutil cat gs://$BUCKET/quests/serverlessml/data/taxi-train-000000000000.csv | head -2",
"52,2015-02-07 23:10:27 UTC,-73.781852722167969,40.644840240478516,-73.967453002929688,40.771881103515625,2,unused\n57.33,2015-02-15 12:22:12 UTC,-73.98321533203125,40.738700866699219,-73.78955078125,40.642852783203125,2,unused\n"
]
],
[
[
"Copyright 2019 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e74ebc75040502f3f3e64f4d2a4e0cc18000c08c | 256,105 | ipynb | Jupyter Notebook | Lineal systems 1.ipynb | DavidHdezU/DifferentialEquations | f7fecbe0fd7eee79d410b7b7a7fa5cedfcd5d9d6 | [
"MIT"
] | null | null | null | Lineal systems 1.ipynb | DavidHdezU/DifferentialEquations | f7fecbe0fd7eee79d410b7b7a7fa5cedfcd5d9d6 | [
"MIT"
] | null | null | null | Lineal systems 1.ipynb | DavidHdezU/DifferentialEquations | f7fecbe0fd7eee79d410b7b7a7fa5cedfcd5d9d6 | [
"MIT"
] | null | null | null | 483.216981 | 70,660 | 0.947924 | [
[
[
"# Lineal Systems pt. 1",
"_____no_output_____"
],
[
"## First Execersice\n$$y^{'}= Ay$$\n$$A = \\begin{bmatrix}\n-1 & 1\\\\\n-5 & -5\n\\end{bmatrix} $$\n\n$\\textit{Initial Value Problem}$ \n$$y(0) = \\begin{bmatrix}\n1\\\\\n5\n\\end{bmatrix} $$",
"_____no_output_____"
]
],
[
[
"from DE import lineal_system",
"_____no_output_____"
],
[
"from scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"IVP = [1, -5]",
"_____no_output_____"
],
[
"# Time line\nt = np.linspace(0, 30, 60) ",
"_____no_output_____"
],
[
"# solve ODEs\nx1, y1, x2, y2 = -1, 1, -5, -5 \ny = odeint(lineal_system,IVP,t, args=(x1, y1, x2, y2,))",
"_____no_output_____"
],
[
"# Plot x-axis\nx_axis = y[:,0]\nplt.semilogy(t, x_axis)\nplt.xlabel('time')\nplt.ylabel('x_axis')\nplt.legend()\nplt.show()",
"No handles with labels found to put in legend.\n"
],
[
"# Plot y-axis\ny_axis = y[:,1]\nplt.semilogy(t, y_axis)\nplt.xlabel('time')\nplt.ylabel('y_axis')\nplt.legend()\nplt.show()",
"No handles with labels found to put in legend.\n"
],
[
"# Plot both\n# plot results\nplt.plot(t,x_axis,'r-',linewidth=2,label='x-axis')\nplt.plot(t,y_axis,'b-',linewidth=2,label='y_axis')\nplt.xlabel('time')\nplt.show()",
"_____no_output_____"
],
[
"# Slope fields\n# Solution curve\n\n# Vector field\nX, Y = np.meshgrid(np.linspace(-10, 10, 20), np.linspace(-10, 10, 20))\nU = x1*X + y1*Y\nV = x2*X + y2*Y\n# Normalize arrows\nN = np.sqrt(U ** 2 + V ** 2)\nU = U / N\nV = V / N\nplt.quiver(X, Y, U, V, angles=\"xy\")\nplt.plot(y[:, 0], y[:, 1], \"-\")\n\nplt.xlim([-10, 10])\nplt.ylim([-10, 10])\nplt.xlabel(r\"$x$\")\nplt.ylabel(r\"$y$\")",
"_____no_output_____"
]
],
[
[
"## Second Execersice\n$$y^{'}= Ay$$\n$$A = \\begin{bmatrix}\n-5 & 1\\\\\n-2 & -2\n\\end{bmatrix} $$\n\n$\\textit{Initial Value Problem}$ \n$$y(0) = \\begin{bmatrix}\n0\\\\\n-1\n\\end{bmatrix} $$",
"_____no_output_____"
]
],
[
[
"IVP = [0, -1]",
"_____no_output_____"
],
[
"# Time line\nt = np.linspace(0, 30, 60)",
"_____no_output_____"
],
[
"# solve system\nx1, y1, x2, y2 = -5, 1, -2, -2 \ny = odeint(lineal_system,IVP,t, args=(x1, y1, x2, y2,))",
"_____no_output_____"
],
[
"# Plot x-axis\nx_axis = y[:,0]\nplt.semilogy(t, x_axis)\nplt.xlabel('time')\nplt.ylabel('x_axis')\nplt.legend()\nplt.show()",
"No handles with labels found to put in legend.\n"
],
[
"# Plot y-axis\ny_axis = y[:,1]\nplt.semilogy(t, y_axis)\nplt.xlabel('time')\nplt.ylabel('y_axis')\nplt.legend()\nplt.show()",
"No handles with labels found to put in legend.\n"
],
[
"# Plot both\n# plot results\nplt.plot(t,x_axis,'r-',linewidth=2,label='x-axis')\nplt.plot(t,y_axis,'b-',linewidth=2,label='y_axis')\nplt.xlabel('time')\nplt.show()",
"_____no_output_____"
],
[
"# Slope fields\n# Solution curve\n\n# Vector field\nX, Y = np.meshgrid(np.linspace(-5, 5, 20), np.linspace(-5, 5, 20))\nU = x1*X + y1*Y\nV = x2*X + y2*Y\n# Normalize arrows\nN = np.sqrt(U ** 2 + V ** 2)\nU = U / N\nV = V / N\nplt.quiver(X, Y, U, V, angles=\"xy\")\nplt.plot(y[:, 0], y[:, 1], \"-\")\n\nplt.xlim([-5, 5])\nplt.ylim([-5, 5])\nplt.xlabel(r\"$x$\")\nplt.ylabel(r\"$y$\")",
"_____no_output_____"
]
],
[
[
"## See multiple curve solutions",
"_____no_output_____"
]
],
[
[
"\n# Slope fields\n# Solution curve\n\n# Vector field\nX, Y = np.meshgrid(np.linspace(-10, 10, 20), np.linspace(-10, 10, 20))\nU = x1*X + y1*Y\nV = x2*X + y2*Y\n# Normalize arrows\nN = np.sqrt(U ** 2 + V ** 2)\nU = U / N\nV = V / N\nplt.quiver(X, Y, U, V, angles=\"xy\")\n\nfor y0 in np.linspace(-5.0, 0.0, 10):\n y_initial = [y0, -10.0]\n y = odeint(lineal_system,y_initial,t, args=(x1, y1, x2, y2,))\n plt.plot(y[:, 0], y[:, 1], \"-\")\nplt.xlim([-10, 10])\nplt.ylim([-10, 10])\nplt.xlabel(r\"$x$\")\nplt.ylabel(r\"$y$\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74ec2e41fbfb4c32cae5beb6b2221bafb1c5af6 | 10,436 | ipynb | Jupyter Notebook | public/data/Wrangling Hits.ipynb | domluna/mlbhits | 50caed6b8a64d38100b128830ff9f2dff8472ab7 | [
"MIT"
] | 2 | 2015-11-02T19:35:10.000Z | 2017-03-05T00:49:36.000Z | public/data/Wrangling Hits.ipynb | domluna/mlbhits | 50caed6b8a64d38100b128830ff9f2dff8472ab7 | [
"MIT"
] | null | null | null | public/data/Wrangling Hits.ipynb | domluna/mlbhits | 50caed6b8a64d38100b128830ff9f2dff8472ab7 | [
"MIT"
] | null | null | null | 28.12938 | 132 | 0.348314 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e74ec5294c9f48637437629090a7ddfd6a174b22 | 9,296 | ipynb | Jupyter Notebook | notebooks/Make custom Process and add to Pipeline.ipynb | free-variation/cltk | 3af75e0e1075355c456ee815510791262005b460 | [
"MIT"
] | null | null | null | notebooks/Make custom Process and add to Pipeline.ipynb | free-variation/cltk | 3af75e0e1075355c456ee815510791262005b460 | [
"MIT"
] | null | null | null | notebooks/Make custom Process and add to Pipeline.ipynb | free-variation/cltk | 3af75e0e1075355c456ee815510791262005b460 | [
"MIT"
] | null | null | null | 33.2 | 358 | 0.530658 | [
[
[
"This notebook shows how to wrap a function with a `Process`, then to call it in a `Pipeline`",
"_____no_output_____"
],
[
"# Make a new `Process`\n\nTo understand how a `Process` works, we will create a new one here. We will make one specific for transliteration, then subclass that for a particular language.",
"_____no_output_____"
]
],
[
[
"from cltk.core.data_types import Process",
"_____no_output_____"
],
[
"# this code in the CLTK takes the Anglo-Saxon runic alphabet and turns it into the Latin alphabet\nfrom cltk.phonology.ang.transliteration import Transliterate",
"_____no_output_____"
],
[
"oe_runes = \"ᚩᚠᛏ ᛋᚳᚣᛚᛞ ᛋᚳᛖᚠᛁᛝ ᛋᚳᛠᚦᛖᚾᚪ ᚦᚱᛠᛏᚢᛗ\" # type str\noe_latin = Transliterate().transliterate(text=oe_runes, mode=\"Latin\") # type str\nprint(oe_latin)",
"oft scyld scefin sceathena threatum\n"
],
[
"from dataclasses import dataclass\nfrom copy import deepcopy\nfrom boltons.cacheutils import cachedproperty\nfrom cltk.core.exceptions import CLTKException\nfrom cltk.core.data_types import Doc, Word",
"_____no_output_____"
],
[
"@dataclass\nclass OldEnglishTransliterationProcess(Process):\n \"\"\"A simple ``Process`` for transliteration of \n Old English in the runic alphabet.\n \"\"\"\n \n language: str = None\n\n @cachedproperty\n def algorithm(self):\n \"\"\"This is the algo to be sent over\"\"\"\n return Transliterate().transliterate\n\n def run(self, input_doc: Doc) -> Doc:\n output_doc = deepcopy(input_doc)\n \n for index, word_obj in enumerate(output_doc.words):\n oe_latin = self.algorithm(text=word_obj.string, mode=\"Latin\") # type str\n word_obj.phonetic_transcription = oe_latin\n output_doc.words[index] = word_obj\n\n return output_doc\n\noe_words = [Word(string=w) for w in oe_runes.split()]\ncltk_doc_oe = Doc(words=oe_words)\ntranslit_proc = OldEnglishTransliterationProcess\ncltk_doc_oe = translit_proc().run(input_doc=cltk_doc_oe)",
"_____no_output_____"
],
[
"# now you can see that a value has been added to Word.phonetic_transcription\nprint(cltk_doc_oe.words[0])",
"Word(index_char_start=None, index_char_stop=None, index_token=None, index_sentence=None, string='ᚩᚠᛏ', pos=None, lemma=None, stem=None, scansion=None, xpos=None, upos=None, dependency_relation=None, governor=None, features={}, category={}, embedding=None, stop=None, named_entity=None, syllables=None, phonetic_transcription='oft', definition=None)\n"
],
[
"print([(w.string, w.phonetic_transcription) for w in cltk_doc_oe.words])",
"[('ᚩᚠᛏ', 'oft'), ('ᛋᚳᚣᛚᛞ', 'scyld'), ('ᛋᚳᛖᚠᛁᛝ', 'scefin'), ('ᛋᚳᛠᚦᛖᚾᚪ', 'sceathena'), ('ᚦᚱᛠᛏᚢᛗ', 'threatum')]\n"
]
],
[
[
"Note that most ``Process``es in the CLTK library are more complex than this, as they allow for inheritance, which helps the project scale better. For instance:\n\n`Process` <--- `StemmingProcess` <--- {`LatinStemmingProcess`, `MiddleEnglishStemmingProcess`, `MiddleHighGermanStemmingProcess`, `OldFrenchStemmingProcess`}\n\nIn these cases, the separation of `algorithm` from `run` allows for different functions to be called for each language.",
"_____no_output_____"
],
[
"# Add a `Process` to a `Pipeline`\n\nEach `Process` takes a `Doc`, adds information to it and its `Word`s, then and returns the `Doc`. A `Process` like our new `OldEnglishTransliterationProcess` might belong at the end, so in the following we append it to the end of the `Pipeline`.",
"_____no_output_____"
]
],
[
[
"from cltk import NLP",
"_____no_output_____"
],
[
"# Load the Old English NLP class\ncltk_nlp = NLP(language=\"ang\")",
"𐤀 CLTK version 'cltk 1.0.0b10'.\nPipeline for language 'Old English (ca. 450-1100)' (ISO: 'ang'): `MultilingualTokenizationProcess`, `OldEnglishLemmatizationProcess`, `OldEnglishEmbeddingsProcess`, `StopsProcess`, `OldEnglishNERProcess`.\n"
],
[
"# Inspect the Pipline, which is contained in NLP\nfrom pprint import pprint\npprint(cltk_nlp.pipeline.processes)",
"[<class 'cltk.tokenizers.processes.MultilingualTokenizationProcess'>,\n <class 'cltk.lemmatize.processes.OldEnglishLemmatizationProcess'>,\n <class 'cltk.embeddings.processes.OldEnglishEmbeddingsProcess'>,\n <class 'cltk.stops.processes.StopsProcess'>,\n <class 'cltk.ner.processes.OldEnglishNERProcess'>]\n"
],
[
"# Add the new custom Process to the end\ncltk_nlp.pipeline.processes.append(OldEnglishTransliterationProcess)",
"_____no_output_____"
],
[
"# Now run the pipeline and see the results written to Word.phonetic_transcription\ncltk_doc = cltk_nlp.analyze(text=oe_runes)\nprint(cltk_doc.words[0])",
"Word(index_char_start=0, index_char_stop=3, index_token=0, index_sentence=None, string='ᚩᚠᛏ', pos=None, lemma='ᚩᚠᛏ', stem=None, scansion=None, xpos=None, upos=None, dependency_relation=None, governor=None, features={}, category={}, embedding=array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), stop=False, named_entity=False, syllables=None, phonetic_transcription='oft', definition=None)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74edb40e25ed080f307a1ed8c1d13cbb83523a8 | 8,501 | ipynb | Jupyter Notebook | examples/notebooks/leaflet.ipynb | buncis/pycall.rb | df21b838e6faa6834d9d0d0ecc1692c0e4f224f8 | [
"MIT"
] | 702 | 2017-09-08T05:21:44.000Z | 2022-02-02T21:18:38.000Z | examples/notebooks/leaflet.ipynb | buncis/pycall.rb | df21b838e6faa6834d9d0d0ecc1692c0e4f224f8 | [
"MIT"
] | 107 | 2017-09-08T04:56:15.000Z | 2022-03-11T17:57:25.000Z | examples/notebooks/leaflet.ipynb | buncis/pycall.rb | df21b838e6faa6834d9d0d0ecc1692c0e4f224f8 | [
"MIT"
] | 54 | 2017-09-08T16:16:30.000Z | 2022-03-04T13:51:24.000Z | 108.987179 | 6,914 | 0.718621 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e74eded6880359db29d73b41faea92b2ff5554e8 | 658,717 | ipynb | Jupyter Notebook | vg_vis_rels.ipynb | xujing113221/ContrastiveLosses4VRD | f7539ca1bda75ef7a6bf23713b927b4f19607813 | [
"MIT"
] | 195 | 2019-03-19T01:12:15.000Z | 2022-03-28T08:27:58.000Z | vg_vis_rels.ipynb | xujing113221/ContrastiveLosses4VRD | f7539ca1bda75ef7a6bf23713b927b4f19607813 | [
"MIT"
] | 30 | 2019-06-05T13:13:28.000Z | 2022-02-08T04:56:59.000Z | vg_vis_rels.ipynb | xujing113221/ContrastiveLosses4VRD | f7539ca1bda75ef7a6bf23713b927b4f19607813 | [
"MIT"
] | 52 | 2019-03-18T22:23:43.000Z | 2022-03-21T07:51:28.000Z | 1,719.887728 | 647,084 | 0.958626 | [
[
[
"import os\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport numpy as np\nfrom six.moves import cPickle as pickle\nimport json\nfrom tqdm import tqdm",
"_____no_output_____"
],
[
"# VG\ndir_path = 'Outputs/vg_X-101-64x4d-FPN/'",
"_____no_output_____"
],
[
"topk_dets_file = os.path.join(dir_path, 'rel_detections_topk.pkl')\nwith open(topk_dets_file, 'rb') as f:\n topk_dets = pickle.load(f)",
"_____no_output_____"
],
[
"print(len(topk_dets))\nprint(topk_dets[0].keys())\nprint(topk_dets[0]['det_boxes_o_top'].shape)\nprint(topk_dets[0]['det_scores_top'].shape)\nmin_len = 100\nfor det in topk_dets:\n if min_len > det['det_scores_top'].shape[0]:\n min_len = det['det_scores_top'].shape[0]\nprint(min_len)",
"26446\ndict_keys(['image', 'det_boxes_s_top', 'det_boxes_o_top', 'det_labels_s_top', 'det_labels_p_top', 'det_labels_o_top', 'det_scores_top', 'gt_boxes_sbj', 'gt_boxes_obj', 'gt_labels_sbj', 'gt_labels_obj', 'gt_labels_prd'])\n(100, 4)\n(100,)\n100\n"
],
[
"# VG\nimg_path = 'data/vg/VG_100K/'\nwith open('data/vg/objects.json') as f:\n obj_cats = json.load(f)\nwith open('data/vg/predicates.json') as f:\n prd_cats = json.load(f)",
"_____no_output_____"
],
[
"def box_overlap(box1, box2):\n overlap = 0.0\n box_area = (\n (box2[2] - box2[0] + 1) *\n (box2[3] - box2[1] + 1)\n )\n iw = (\n min(box1[2], box2[2]) -\n max(box1[0], box2[0]) + 1\n )\n if iw > 0:\n ih = (\n min(box1[3], box2[3]) -\n max(box1[1], box2[1]) + 1\n )\n if ih > 0:\n ua = float(\n (box1[2] - box1[0] + 1) *\n (box1[3] - box1[1] + 1) +\n box_area - iw * ih\n )\n overlap = iw * ih / ua\n return overlap\n\n# box1 and box2 are in [x1, y1. w. h] format\ndef box_union(box1, box2):\n xmin = min(box1[0], box2[0])\n ymin = min(box1[1], box2[1])\n xmax = max(box1[0] + box1[2] - 1, box2[0] + box2[2] - 1)\n ymax = max(box1[1] + box1[3] - 1, box2[1] + box2[3] - 1)\n return [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1]\n\ndef box2rect(img, box):\n x = box[0] + edge_width / 2\n y = box[1] + edge_width / 2\n w = box[2] - box[0] - edge_width\n h = box[3] - box[1] - edge_width\n return x, y, w, h",
"_____no_output_____"
],
[
"edge_width = 3\nfont_size = 18\ntopk = 50 # 100\nscore_thr = 0.0 # 0.05\n\nsave_output = False\n\nind = np.random.randint(0, len(topk_dets))\nprint('ind: ', ind)\ndet = topk_dets[ind]\n\nsbj_boxes = det['det_boxes_s_top']\nsbj_labels = det['det_labels_s_top']\nobj_boxes = det['det_boxes_o_top']\nobj_labels = det['det_labels_o_top']\nprd_labels = det['det_labels_p_top']\ndet_scores = det['det_scores_top']\n\nimg_name = det['image'].split('/')[-1]\nprint('image: ', img_name)\nprint('topk: ', topk)\nprint('sbj_labels.shape[0]: ', sbj_labels.shape[0])\n\nimg = mpimg.imread(img_path + img_name)\n\nfig = plt.figure(figsize=(18, 12))\nax = plt.gca()\nplt.imshow(img)\nplt.axis('off')\ndet_title = plt.title('det')\nplt.setp(det_title, color='b')\nfor j in range(min(topk, sbj_labels.shape[0])):\n # det\n det_score = det_scores[j]\n if det_score < score_thr:\n continue\n sbj_label = sbj_labels[j]\n obj_label = obj_labels[j]\n prd_label = prd_labels[j]\n sbj_box = sbj_boxes[j]\n obj_box = obj_boxes[j]\n s_name = obj_cats[sbj_label]\n o_name = obj_cats[obj_label]\n p_name = prd_cats[prd_label]\n \n s_x, s_y, s_w, s_h = box2rect(img, sbj_box)\n s_cx = s_x + s_w // 2\n s_cy = s_y + s_h // 2\n ax.text(s_cx, s_cy - 2,\n s_name,\n fontsize=font_size,\n color='white',\n bbox=dict(facecolor='orange', alpha=0.5, pad=0, edgecolor='none'))\n \n o_x, o_y, o_w, o_h = box2rect(img, obj_box)\n o_cx = o_x + o_w // 2\n o_cy = o_y + o_h // 2\n ax.text(o_cx, o_cy - 2,\n o_name,\n fontsize=font_size,\n color='white',\n bbox=dict(facecolor='blue', alpha=0.5, pad=0, edgecolor='none'))\n \n rel_l = lines.Line2D([s_cx, o_cx], [s_cy, o_cy], color='purple', linewidth=edge_width)\n ax.add_line(rel_l)\n ax.text((s_cx + o_cx) / 2, (s_cy + o_cy) / 2,\n p_name,\n fontsize=font_size,\n color='white',\n bbox=dict(facecolor='purple', alpha=0.5, pad=0, edgecolor='none'))\n\nif save_output:\n output_dir = os.path.join(dir_path, img_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n plt.savefig(os.path.join(output_dir, 'all_true_pos.jpg'), bbox_inches='tight')\nplt.show()\nplt.close(fig)\n\n# print names and scores\nfor j in range(min(topk, sbj_labels.shape[0])):\n # det\n det_score = det_scores[j]\n if det_score < score_thr:\n continue\n sbj_label = sbj_labels[j]\n obj_label = obj_labels[j]\n prd_label = prd_labels[j]\n s_name = obj_cats[sbj_label]\n o_name = obj_cats[obj_label]\n p_name = prd_cats[prd_label]\n print('{}: {} {} {}'.format(j, s_name, p_name, o_name))\n print('\\t\\ttotal score:\\t {:.6f}'.format(det_score))",
"ind: 6032\nimage: 2322669.jpg\ntopk: 50\nsbj_labels.shape[0]: 100\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74eeaef8ceed414e3e8fd71bc56d623fc6120a0 | 51,295 | ipynb | Jupyter Notebook | docs_src/vision.learner.ipynb | Gokkulnath/fastai_v1 | 5a24c6ebd42223d37e90463f69d32b4a52b6895c | [
"Apache-2.0"
] | 115 | 2018-07-11T06:21:32.000Z | 2018-09-25T09:16:44.000Z | docs_src/vision.learner.ipynb | rsaxby/fastai_old | c8285fbc246f41066da02a74b86c917923892ba8 | [
"Apache-2.0"
] | 24 | 2018-07-10T22:18:05.000Z | 2018-09-22T00:26:54.000Z | docs_src/vision.learner.ipynb | rsaxby/fastai_old | c8285fbc246f41066da02a74b86c917923892ba8 | [
"Apache-2.0"
] | 45 | 2018-07-10T22:17:23.000Z | 2018-09-25T08:42:28.000Z | 133.929504 | 22,972 | 0.855366 | [
[
[
"# Computer Vision Learner",
"_____no_output_____"
],
[
"[`vision.learner`](/text.learner.html#text.learner) is the module that defines the `Conv_Learner` class, to easily get a model suitable for transfer learning.",
"_____no_output_____"
]
],
[
[
"from fastai.gen_doc.nbdoc import *\nfrom fastai.vision import *\nfrom fastai import *\nfrom fastai.docs import *",
"_____no_output_____"
]
],
[
[
"## Transfer learning",
"_____no_output_____"
],
[
"Transfer learning is a technique where you use a model trained on a very large dataset (usually [ImageNet](http://image-net.org/) in computer vision) and then adapt it to your own dataset. The idea is that it has learned to recognize many features on all of this data, and that you will benefit from this knowledge, especially if your dataset is small, compared to starting from a randomly initiliazed model. It has been proved in [this article](https://arxiv.org/abs/1805.08974) on a wide range of tasks that transfer learning nearly always give better results.\n\nIn practice, you need to change the last part of your model to be adapted to your own number of classes. Most convolutional models end with a few linear layers (a part will call head). The last convolutional layer will have analyzed features in the image that went through the model, and the job of the head is to convert those in predictions for each of our classes. In transfer learning we will keep all the convolutional layers (called the body or the backbone of the model) with their weights pretrained on ImageNet but will define a new head initiliazed randomly.\n\nThen we will train the model we obtain in two phases: first we freeze the body weights and only train the head (to convert those analyzed features into predictions for our own data), then we unfreeze the layers of the backbone (gradually if necessary) and fine-tune the whole model (possily using differential learning rates).\n\nThe [`ConvLearner`](/vision.learner.html#ConvLearner) class helps you to automatically get a pretrained model from a given architecture with a custom head that is suitable for your data.",
"_____no_output_____"
]
],
[
[
"show_doc(ConvLearner, doc_string=False)",
"_____no_output_____"
]
],
[
[
"This class creates a [[[`Learner`](/basic_train.html#Learner)](/basic_train.html#Learner)](/basic_train.html#Learner) object from the [`data`](/text.data.html#text.data) object and model inferred from it with the backbone given in `arch`. Specifically, it will cut the model defined by `arch` (randomly initialized if `pretrained` is False) at the last convolutional layer by default (or as defined in `cut`, see below) and add:\n- an [`AdaptiveConcatPool2d`](/layers.html#AdaptiveConcatPool2d) layer,\n- a [`Flatten`](/layers.html#Flatten) layer,\n- blocks of \\[`nn.BatchNorm1d`, `nn.Dropout`, `nn.Linear`, `nn.ReLU`\\] layers.\n\nThe blocks are defined by the `lin_ftrs` and `ps` arguments. Specifically, the first block will have a number of inputs inferred from the backbone `arch` and the last one will have a number of outputs equal to `data.c` (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_frs` (of course a block has a number of inputs equal to the number of outputs of the previous block). The default is to have an intermediate hidden size of 512 (which makes two blocks `model_activation` -> 512 -> `n_classes`). If you pass a float then the final dropout layer will have the value `ps`, and the remaining will be `ps/2`. If you pass a list then the values are used for dropout probabilities directly.\n\nNote that the very last block doesn't have a `nn.ReLU` activation, to allow you to use any final activation you want (generally included in the loss function in pytorch). Also, the backbone will be frozen if you choose `pretrained=True` (so only the head will train if you call [`fit`](/basic_train.html#fit)) so that you can immediately start phase one of training as described above.\n\nAlternatively, you can define your own `custom_head` to put on top of the backbone. If you want to specify where to split `arch` you should so in the argument `cut` which can either be the index of a specific layer (the result will not include that layer) or a function that, when passed the model, will return the backbone you want.\n\nThe final model obtained by stacking the backbone and the head (custom or defined as we saw) is then separated in groups for gradual unfreezeing or differential learning rates. You can specify of to split the backbone in groups with the optional argument `split_on` (should be a function that returns those groups when given the backbone). \n\nThe `kwargs` will be passed on to [[[`Learner`](/basic_train.html#Learner)](/basic_train.html#Learner)](/basic_train.html#Learner), so you can put here anything that [[[`Learner`](/basic_train.html#Learner)](/basic_train.html#Learner)](/basic_train.html#Learner) will accept ([`metrics`](/metrics.html#metrics), `loss_fn`, `opt_fn`...)",
"_____no_output_____"
]
],
[
[
"untar_mnist()\ndata = image_data_from_folder(MNIST_PATH, ds_tfms=get_transforms(do_flip=False, max_warp=0), size=32)",
"_____no_output_____"
],
[
"learner = ConvLearner(data, tvm.resnet18, metrics=[accuracy])\nlearner.fit_one_cycle(1,1e-3)",
"_____no_output_____"
]
],
[
[
"### Customize your model",
"_____no_output_____"
],
[
"You can customize [`ConvLearner`](/vision.learner.html#ConvLearner) for your own models default `cut` and `split_on` functions by adding it them the dictionary `model_meta`. The key should be your model and the value should be a dictionary with the keys `cut` and `split_on` (see the source code for examples). The constructor will call [`create_body`](/vision.learner.html#create_body) and [`create_head`](/vision.learner.html#create_head) for you based on `cut`; you can also call them yourself, which is particularly useful for testing.",
"_____no_output_____"
]
],
[
[
"show_doc(create_body)",
"_____no_output_____"
],
[
"show_doc(create_head, doc_string=False)",
"_____no_output_____"
]
],
[
[
"Model head that takes `nf` features, runs through `lin_ftrs`, and ends with `nc` classes. `ps` is the probability of the dropouts, as documented above in [`ConvLearner`](/vision.learner.html#ConvLearner).",
"_____no_output_____"
],
[
"### Utility methods",
"_____no_output_____"
]
],
[
[
"show_doc(num_features)",
"_____no_output_____"
],
[
"show_doc(ClassificationInterpretation)",
"_____no_output_____"
]
],
[
[
"This provides a confusion matrix and visualization of the most incorrect images. Pass in your [`data`](/text.data.html#text.data), calculated `preds`, actual `y`, and the class of your loss function, and then use the methods below to view the model interpretation results. For instance:",
"_____no_output_____"
]
],
[
[
"learn = ConvLearner(get_mnist(), tvm.resnet18)\nlearn.fit(1)\npreds,y = learn.get_preds()\ninterp = ClassificationInterpretation(data, preds, y, loss_class=nn.CrossEntropyLoss)",
"_____no_output_____"
],
[
"show_doc(ClassificationInterpretation.plot_top_losses)",
"_____no_output_____"
]
],
[
[
"The `k` items are arranged as a square, so it will look best if `k` is a square number (4, 9, 16, etc). The title of each image shows: prediction, actual, loss, probability of actual class.",
"_____no_output_____"
]
],
[
[
"interp.plot_top_losses(9, figsize=(7,7))",
"_____no_output_____"
],
[
"show_doc(ClassificationInterpretation.top_losses)",
"_____no_output_____"
]
],
[
[
"Returns tuple of *(losses,indices)*.",
"_____no_output_____"
]
],
[
[
"interp.top_losses(9)",
"_____no_output_____"
],
[
"show_doc(ClassificationInterpretation.plot_confusion_matrix)",
"_____no_output_____"
],
[
"interp.plot_confusion_matrix()",
"_____no_output_____"
],
[
"show_doc(ClassificationInterpretation.confusion_matrix)",
"_____no_output_____"
],
[
"interp.confusion_matrix()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74ef911e7b2b762d46752e0c3b3e7d0bb67fa5d | 5,001 | ipynb | Jupyter Notebook | copernican/exploratory work.ipynb | ventureBorbot/Data-Analysis | a44122aeb489fd97488c105b951c06d01d2db894 | [
"MIT"
] | 4,358 | 2017-12-29T17:56:07.000Z | 2022-03-30T15:14:57.000Z | copernican/exploratory work.ipynb | ventureBorbot/Data-Analysis | a44122aeb489fd97488c105b951c06d01d2db894 | [
"MIT"
] | 61 | 2018-01-18T17:50:46.000Z | 2022-03-09T20:16:01.000Z | copernican/exploratory work.ipynb | ventureBorbot/Data-Analysis | a44122aeb489fd97488c105b951c06d01d2db894 | [
"MIT"
] | 3,689 | 2017-12-29T17:57:36.000Z | 2022-03-29T12:26:03.000Z | 29.245614 | 394 | 0.582883 | [
[
[
"# Introduction\nState notebook purpose here",
"_____no_output_____"
],
[
"### Imports\nImport libraries and write settings here.",
"_____no_output_____"
]
],
[
[
"# Data manipulation\nimport pandas as pd\nimport numpy as np\n\n# Options for pandas\npd.options.display.max_columns = 50\npd.options.display.max_rows = 30\n\n# Display all cell outputs\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = 'all'\n\nfrom IPython import get_ipython\nipython = get_ipython()\n\n# autoreload extension\nif 'autoreload' not in ipython.extension_manager.loaded:\n %load_ext autoreload\n\n%autoreload 2\n\n# Visualizations\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly.offline import iplot, init_notebook_mode\ninit_notebook_mode(connected=True)\n\nimport cufflinks as cf\ncf.go_offline(connected=True)\ncf.set_config_file(theme='white')",
"_____no_output_____"
]
],
[
[
"# Analysis/Modeling\nDo work here",
"_____no_output_____"
],
[
"# Results\nShow graphs and stats here",
"_____no_output_____"
],
[
"# Conclusion\nSummarize findings here",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e74ef9331eba8e0e4085c62c88fef49bb1684a43 | 26,613 | ipynb | Jupyter Notebook | 45_sympy/10_sympy.ipynb | kangwon-naver/nmisp | 141f8148b3ce783d3df27ee0c9986f530cada8fb | [
"BSD-3-Clause"
] | 7 | 2019-05-14T11:00:53.000Z | 2020-08-27T01:04:29.000Z | 45_sympy/10_sympy.ipynb | kangwon-naver/nmisp | 141f8148b3ce783d3df27ee0c9986f530cada8fb | [
"BSD-3-Clause"
] | 170 | 2018-07-12T06:06:21.000Z | 2022-01-28T09:06:55.000Z | 45_sympy/10_sympy.ipynb | kangwon-naver/nmisp | 141f8148b3ce783d3df27ee0c9986f530cada8fb | [
"BSD-3-Clause"
] | 57 | 2018-08-28T08:38:59.000Z | 2020-09-02T03:40:47.000Z | 17.081515 | 190 | 0.449329 | [
[
[
"# `sympy`\n\n",
"_____no_output_____"
],
[
"[`sympy`](https://www.sympy.org)는 *기호 처리기*로 숫자 대신 기호 연산을 지원한다..<br>\n[`sympy`](https://www.sympy.org), a *symbolic processor* supports operations in symbols instead of numbers.\n\n",
"_____no_output_____"
],
[
"2006년 이후 2019 까지 800명이 넘는 개발자가 작성한 코드를 제공하였다.<br>\nSince 2006, more than 800 developers contributed so far in 2019.\n\n",
"_____no_output_____"
],
[
"## 기호 연산 예<br>Examples of symbolic processing\n\n",
"_____no_output_____"
],
[
"`sympy` 모듈을 `sym` 라는 이름으로 불러온다.<br>Import `sympy` module in the name of `sym`.\n\n",
"_____no_output_____"
]
],
[
[
"import sympy as sym\nsym.init_printing()\n\n",
"_____no_output_____"
]
],
[
[
"비교를 위해 `numpy` 모듈도 불러온다.<br>\nImport `numpy` module to compare.\n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n",
"_____no_output_____"
],
[
"np.pi\n\n",
"_____no_output_____"
],
[
"sym.pi\n\n",
"_____no_output_____"
]
],
[
[
"#### 오일러 공식<br>Euler formula\n\n",
"_____no_output_____"
],
[
"$$\ne ^ {\\pi i} + 1 = 0\n$$",
"_____no_output_____"
]
],
[
[
"np.exp(np.pi * 1j) + 1\n\n",
"_____no_output_____"
],
[
"sym.exp(sym.pi * 1j) + 1\n\n",
"_____no_output_____"
],
[
"sym.simplify(_)\n\n",
"_____no_output_____"
]
],
[
[
"#### 무한대<br>Infinity\n\n",
"_____no_output_____"
]
],
[
[
"np.inf, np.inf > 999999\n\n",
"_____no_output_____"
],
[
"sym.oo, sym.oo > 999999\n\n",
"_____no_output_____"
]
],
[
[
"### 제곱근<br>Square root\n\n",
"_____no_output_____"
],
[
"10의 제곱근을 구해보자.<br>Let't find the square root of ten.\n\n",
"_____no_output_____"
]
],
[
[
"np.sqrt(10)\n\n",
"_____no_output_____"
],
[
"sym.sqrt(10)\n\n",
"_____no_output_____"
]
],
[
[
"결과를 숫자로 살펴보려면 `evalf()` 메소드를 사용한다.<br>\nUse `evalf()` method to check the result in digits.\n\n",
"_____no_output_____"
]
],
[
[
"sym.sqrt(10).evalf()\n\n",
"_____no_output_____"
],
[
"sym.sqrt(10).evalf(30)\n\n",
"_____no_output_____"
]
],
[
[
"10의 제곱근을 제곱해보자.<br>Let't square the square root of ten.\n\n",
"_____no_output_____"
]
],
[
[
"print(f\"np.sqrt(10) ** 2 = {np.sqrt(10) ** 2}\")\n\n",
"_____no_output_____"
],
[
"sym.sqrt(10) ** 2\n\n",
"_____no_output_____"
]
],
[
[
"위 결과의 차이에 대해 어떻게 생각하는가?<br>\nWhat do you think about the differences of the results above?\n\n",
"_____no_output_____"
],
[
"### 분수<br>Fractions\n\n",
"_____no_output_____"
],
[
"15 / 11 을 생각해보자.<br>Let't think about 15/11.\n\n",
"_____no_output_____"
]
],
[
[
"num = 15\nden = 11\n\n",
"_____no_output_____"
],
[
"division = num / den\n\n",
"_____no_output_____"
],
[
"division\n\n",
"_____no_output_____"
],
[
"print(division * den)\n\n",
"_____no_output_____"
],
[
"import fractions\n\n",
"_____no_output_____"
],
[
"fr_division = fractions.Fraction(num, den)\n\n",
"_____no_output_____"
],
[
"fr_division\n\n",
"_____no_output_____"
],
[
"fr_division * den\n\n",
"_____no_output_____"
],
[
"sym_division = sym.Rational(num, den)\n\n",
"_____no_output_____"
],
[
"sym_division\n\n",
"_____no_output_____"
],
[
"sym_division * den\n\n",
"_____no_output_____"
]
],
[
[
"위 결과의 차이에 대해 어떻게 생각하는가?<br>\nWhat do you think about the differences of the results above?\n\n",
"_____no_output_____"
],
[
"### 변수를 포함하는 수식<br>Expressions with variables\n\n",
"_____no_output_____"
],
[
"사용할 변수를 정의.<br>Define variables to use.\n\n",
"_____no_output_____"
]
],
[
[
"a, b, c, x = sym.symbols('a b c x')\ntheta, phi = sym.symbols('theta phi')\n\n",
"_____no_output_____"
]
],
[
[
"변수들을 한번 살펴보자.<br>Let's take a look at the variables\n\n",
"_____no_output_____"
]
],
[
[
"a, b, c, x\n\n",
"_____no_output_____"
],
[
"theta, phi\n\n",
"_____no_output_____"
]
],
[
[
"변수를 조합하여 새로운 수식을 만들어 보자.<br>\nLet's make equations using variables.\n\n",
"_____no_output_____"
]
],
[
[
"y = a * x + b\n\n",
"_____no_output_____"
],
[
"y\n\n",
"_____no_output_____"
],
[
"z = a * x * x + b * x + c\n\n",
"_____no_output_____"
],
[
"z\n\n",
"_____no_output_____"
],
[
"w = a * sym.sin(theta) ** 2 + b\n\n",
"_____no_output_____"
],
[
"w\n\n",
"_____no_output_____"
],
[
"p = (x - a) * (x - b) * (x - c)\n\n",
"_____no_output_____"
],
[
"p\n\n",
"_____no_output_____"
],
[
"sym.expand(p, x)\n\n",
"_____no_output_____"
],
[
"sym.collect(_, x)\n\n",
"_____no_output_____"
]
],
[
[
"$$\n\\frac{a + ab}{a}\n$$",
"_____no_output_____"
]
],
[
[
"sym.simplify((a + a * b) / a)\n\n",
"_____no_output_____"
]
],
[
[
"### `sympy` 범위 기호 생성<br>Creating `sympy` symbols with range\n\n",
"_____no_output_____"
]
],
[
[
"sym.symbols('i:n')\n\n",
"_____no_output_____"
],
[
"sym.symbols('z1:3')\n\n",
"_____no_output_____"
],
[
"sym.symbols('w(:c)')\n\n",
"_____no_output_____"
],
[
"sym.symbols('a(:2)(:3)')\n\n",
"_____no_output_____"
]
],
[
[
"### 그래프<br>Plot\n\n",
"_____no_output_____"
]
],
[
[
"import sympy.plotting as splot\n\n",
"_____no_output_____"
],
[
"splot.plot(sym.sin(x));\n\n",
"_____no_output_____"
],
[
"import mpmath\nsplot.plot(sym.sin(mpmath.radians(x)), (x, -360, 360));\n\n",
"_____no_output_____"
],
[
"splot.plot_parametric((sym.cos(theta), sym.sin(theta)), (theta, -sym.pi, sym.pi));\n\n",
"_____no_output_____"
],
[
"splot.plot_parametric(\n 16 * (sym.sin(theta)**3),\n 13 * sym.cos(theta) - 5 * sym.cos(2*theta) - 2 * sym.cos(3*theta) - sym.cos(4*theta),\n (theta, -sym.pi, sym.pi)\n);\n\n",
"_____no_output_____"
]
],
[
[
"#### 3차원 그래프<br>3D Plot\n\n",
"_____no_output_____"
]
],
[
[
"x, y = sym.symbols('x y')\nsplot.plot3d(sym.cos(x) + sym.sin(y), (x, -5, 5), (y, -5, 5));\n\n",
"_____no_output_____"
],
[
"splot.plot3d_parametric_line(x, 25-x**2, 25-x**2, (x, -5, 5));\n\n",
"_____no_output_____"
],
[
"u, v = sym.symbols('u v')\nsplot.plot3d_parametric_surface(u + v, sym.sin(u), sym.cos(u), (u, -1, 1), (v, -1, 1));\n\n",
"_____no_output_____"
]
],
[
[
"### 극한<br>Limits\n\n",
"_____no_output_____"
],
[
"$$\n\\lim_{x \\to 0} \\frac{sin x}{x}\n$$",
"_____no_output_____"
]
],
[
[
"sym.limit(sym.sin(x) / x, x, 0)\n\n",
"_____no_output_____"
]
],
[
[
"$$\n\\lim_{x \\to \\infty} x\n$$",
"_____no_output_____"
]
],
[
[
"sym.limit(x, x, sym.oo)\n\n",
"_____no_output_____"
]
],
[
[
"$$\n\\lim_{x \\to \\infty} \\frac{1}{x}\n$$",
"_____no_output_____"
]
],
[
[
"sym.limit(1 / x, x, sym.oo)\n\n",
"_____no_output_____"
]
],
[
[
"$$\n\\lim_{x \\to 0} x^x\n$$",
"_____no_output_____"
]
],
[
[
"sym.limit(x ** x, x, 0)\n\n",
"_____no_output_____"
]
],
[
[
"### 미적분<br>Calculus\n\n",
"_____no_output_____"
]
],
[
[
"z\n\n",
"_____no_output_____"
]
],
[
[
"$$\n\\frac{dz}{dx} =\\frac{d}{dx} \\left( a x^2 + bx + c \\right)\n$$",
"_____no_output_____"
]
],
[
[
"z.diff(x)\n\n",
"_____no_output_____"
]
],
[
[
"$$\n\\int{z}{dx} =\\int{\\left(a x^2 + bx + c \\right)}{dx}\n$$",
"_____no_output_____"
]
],
[
[
"sym.integrate(z, x)\n\n",
"_____no_output_____"
],
[
"w\n\n",
"_____no_output_____"
],
[
"w.diff(theta)\n\n",
"_____no_output_____"
],
[
"sym.integrate(w, theta)\n\n",
"_____no_output_____"
]
],
[
[
"#### 정적분<br>Definite integral\n\n",
"_____no_output_____"
]
],
[
[
"sym.integrate(w, (theta, 0, sym.pi))\n\n",
"_____no_output_____"
]
],
[
[
"### 근<br>Root\n\n",
"_____no_output_____"
]
],
[
[
"z\n\n",
"_____no_output_____"
],
[
"z_sol_list = sym.solve(z, x)\n\n",
"_____no_output_____"
],
[
"z_sol_list\n\n",
"_____no_output_____"
],
[
"sym.solve(2* sym.sin(theta) ** 2 - 1, theta)\n\n",
"_____no_output_____"
]
],
[
[
"### 코드 생성<br>Code generation\n\n",
"_____no_output_____"
]
],
[
[
"print(sym.python(z_sol_list[0]))\n\n",
"_____no_output_____"
],
[
"import sympy.utilities.codegen as sc\n\n",
"_____no_output_____"
],
[
"[(c_name, c_code), (h_name, c_header)] = sc.codegen(\n (\"z_sol\", z_sol_list[0]), \n \"C89\", \n \"test\"\n)\n\n",
"_____no_output_____"
],
[
"c_name\n\n",
"_____no_output_____"
],
[
"print(c_code)\n\n",
"_____no_output_____"
],
[
"h_name\n\n",
"_____no_output_____"
],
[
"print(c_header)\n\n",
"_____no_output_____"
]
],
[
[
"### 방정식<br>Equation solving\n\n",
"_____no_output_____"
],
[
"$$\nx^4=1\n$$",
"_____no_output_____"
]
],
[
[
"sym.solve(x ** 4 - 1, x)\n\n",
"_____no_output_____"
],
[
"sym.solveset(x ** 4 - 1, x)\n\n",
"_____no_output_____"
]
],
[
[
"$$\ne^x=-1\n$$",
"_____no_output_____"
]
],
[
[
"sym.solve(sym.exp(x) + 1, x)\n\n",
"_____no_output_____"
]
],
[
[
"$$\nx^4 - 3x^2 +1\n$$",
"_____no_output_____"
]
],
[
[
"f = x ** 4 - 3 * x ** 2 + 1\nsym.factor(f)\n\n",
"_____no_output_____"
],
[
"sym.factor(f, modulus=5)\n\n",
"_____no_output_____"
]
],
[
[
"Boolean equations\n\n",
"_____no_output_____"
]
],
[
[
"sym.satisfiable(a & b)\n\n",
"_____no_output_____"
],
[
"sym.satisfiable(a ^ b)\n\n",
"_____no_output_____"
]
],
[
[
"### 연립방정식<br>System of equations\n\n",
"_____no_output_____"
]
],
[
[
"a1, a2, a3 = sym.symbols('a1:4')\nb1, b2, b3 = sym.symbols('b1:4')\nc1, c2 = sym.symbols('c1:3')\nx1, x2 = sym.symbols('x1:3')\n\n",
"_____no_output_____"
],
[
"eq1 = sym.Eq(\n a1 * x1 + a2 * x2, \n c1,\n)\n\n",
"_____no_output_____"
],
[
"eq1\n\n",
"_____no_output_____"
],
[
"eq2 = sym.Eq(\n b1 * x1 + b2 * x2,\n c2,\n)\n\n",
"_____no_output_____"
],
[
"eq2\n\n",
"_____no_output_____"
],
[
"eq_list = [eq1, eq2]\n\n",
"_____no_output_____"
],
[
"eq_list\n\n",
"_____no_output_____"
],
[
"sym.solve(eq_list, (x1, x2))\n\n",
"_____no_output_____"
]
],
[
[
"### 행렬<br>Matrix\n\n",
"_____no_output_____"
]
],
[
[
"identity = sym.Matrix([[1, 0], [0, 1]])\nidentity\n\n",
"_____no_output_____"
],
[
"A = sym.Matrix([[1, a], [b, 1]])\nA\n\n",
"_____no_output_____"
],
[
"A * identity\n\n",
"_____no_output_____"
],
[
"A * A\n\n",
"_____no_output_____"
],
[
"A ** 2\n\n",
"_____no_output_____"
]
],
[
[
"### 미분방정식<br>Differential Equations\n\n",
"_____no_output_____"
],
[
"$$\n\\frac{d^2}{dx^2}f(x) + f(x)\n$$",
"_____no_output_____"
]
],
[
[
"f = sym.Function('f', real=True)\n\n",
"_____no_output_____"
],
[
"(f(x).diff(x, x) + f(x))\n\n",
"_____no_output_____"
],
[
"sym.dsolve(f(x).diff(x, x) + f(x))\n\n",
"_____no_output_____"
]
],
[
[
"기계진동<br>Mechanical Vibration\n\n",
"_____no_output_____"
],
[
"$$\nm \\frac{d^2x(t)}{dt^2} +c \\frac{dx(t)}{dt} + k x(t) = 0\n$$",
"_____no_output_____"
]
],
[
[
"m, c, k, t = sym.symbols('m c k t')\nx = sym.Function('x', real=True)\nvib_eq = m * x(t).diff(t, t) + c * x(t).diff(t) + k * x(t)\nvib_eq\n\n",
"_____no_output_____"
],
[
"result = sym.dsolve(vib_eq)\nresult\n\n",
"_____no_output_____"
],
[
"sym.simplify(result)\n\n",
"_____no_output_____"
]
],
[
[
"강제진동<br>Forced Vibration\n\n",
"_____no_output_____"
],
[
"$$\nm \\frac{d^2x(t)}{dt^2} +c \\frac{dx(t)}{dt} + x(t) = sin(t)\n$$",
"_____no_output_____"
]
],
[
[
"forced_vib_eq = m * x(t).diff(t, t) + c * x(t).diff(t) + k * x(t) - sym.sin(t)\nforced_vib_eq\n\n",
"_____no_output_____"
],
[
"result = sym.dsolve(forced_vib_eq)\nresult\n\n",
"_____no_output_____"
],
[
"sym.simplify(result)\n\n",
"_____no_output_____"
]
],
[
[
"## 참고문헌<br>References\n\n",
"_____no_output_____"
],
[
"* SymPy Development Team, SymPy 1.4 documentation, sympy.org, 2019 04 10. [Online] Available : https://docs/sympy.org/latest/index.html.\n* SymPy Development Team, SymPy Tutorial, SymPy 1.4 documentation, sympy.org, 2019 04 10. [Online] Available : https://docs/sympy.org/latest/tutorial/index.html.\n* d84_n1nj4, \"How to keep fractions in your equation output\", Stackoverflow.com, 2017 08 12. [Online] Available : https://stackoverflow.com/a/45651175.\n* Python developers, \"Fractions\", Python documentation, 2019 10 12. [Online] Available : https://docs.python.org/3.7/library/fractions.html.\n* SymPy Development Team, codegen, SymPy 1.4 documentation, sympy.org, 2019 04 10. [Online] Available : https://docs/sympy.org/latest/modules/utilities/codegen.html.\n* Pedregosa, F., Sympy : Symbolic Mathematics in Python, Scipy Lecture Notes, 2019 March,[Online] Available : http://www.scipy-lectures.org/packages/sympy.html [Accessed 2019 10 28]\n* MIT, Twitter, 2021 Feb. [Online] Available : https://twitter.com/MIT/status/1360971008325406721.\n\n",
"_____no_output_____"
],
[
"## Final Bell<br>마지막 종\n\n",
"_____no_output_____"
]
],
[
[
"# stackoverfow.com/a/24634221\nimport os\nos.system(\"printf '\\a'\");\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e74f00406557d0672be3961b9c557c0d8245adf0 | 8,183 | ipynb | Jupyter Notebook | Tools/.ipynb_checkpoints/Intersections CSV to GeoJson-checkpoint.ipynb | redmondWC/Intersections-Walnut-Creek-CA | 5667595d6048b6452c3d94f54841b266e39fb867 | [
"MIT"
] | null | null | null | Tools/.ipynb_checkpoints/Intersections CSV to GeoJson-checkpoint.ipynb | redmondWC/Intersections-Walnut-Creek-CA | 5667595d6048b6452c3d94f54841b266e39fb867 | [
"MIT"
] | null | null | null | Tools/.ipynb_checkpoints/Intersections CSV to GeoJson-checkpoint.ipynb | redmondWC/Intersections-Walnut-Creek-CA | 5667595d6048b6452c3d94f54841b266e39fb867 | [
"MIT"
] | null | null | null | 28.712281 | 145 | 0.515703 | [
[
[
"# Intersection CSV to GeoJSON Converstion Script",
"_____no_output_____"
],
[
"Input list of intersections as CSV file in format: ID, Name1, Name2, Latitude, Longitude\n\nWhere Name 1 could be north/south street name and Name 2 could be east/west street name\n\n### CSV Input Example:\n2,California Blvd,Ygnacio Valley Rd,37.904976, -122.065751\n\n\nOutputs GeoJSON feature files from input data and adds elevation in feet when available.\n\n### GeoJSON Output Example:\n{\n \"geometry\": {\n \"coordinates\": [\n -122.065751,\n 37.904976,\n 156.12\n ],\n \"type\": \"Point\"\n },\n \"id\": \"02\",\n \"properties\": {\n \"name\": \"California Blvd & Ygnacio Valley Rd\"\n },",
"_____no_output_____"
]
],
[
[
"#imports\nfrom geojson import Point, Feature, FeatureCollection, dump\nimport csv\n\nimport requests\nimport urllib\n\nimport os, sys",
"_____no_output_____"
],
[
"#Variables\n\nfeatures = []\ninput_filename = \"../WalnutCreekIntersections.csv\"\noutput_filename = \"../WalnutCreekIntersections-April_2022.geojson\"\nurl = r'https://nationalmap.gov/epqs/pqs.php?'\n",
"_____no_output_____"
],
[
"def elevation_of_location(lat, lon):\n \"\"\"Query service using lat, lon. Add the elevation values as a new column.\"\"\"\n # define rest query params\n params = {\n 'output': 'json',\n 'x': lon,\n 'y': lat,\n 'units': 'Feet' #'Meters' is also available\n }\n # format query string and return query value\n result = requests.get((url + urllib.parse.urlencode(params)))\n if result is not None:\n return result.json()['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation']\n else: \n return None\n ",
"_____no_output_____"
],
[
"def read_file():\n \"read in csv file and output geojson file\"\n with open(input_filename, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n line_count = 0\n for row in csv_reader:\n lat = float(row[3])\n lon = float(row[4])\n ele = elevation_of_location(lat,lon)\n if ele is not None:\n features.append(Feature(geometry=Point( ( lon, lat, ele )), properties={\"name\": row[1] +\" & \"+ row[2]}, id=row[0] ))\n \n else:\n features.append(Feature(geometry=Point( ( lon, lat)), properties={\"name\": row[1] +\" & \"+ row[2]}, id=row[0] ))\n \n line_count += 1\n print(f'Processing line #{line_count}')\n \n feature_collection = FeatureCollection(features)\n with open(output_filename, 'w') as f:\n \tdump(feature_collection, f, sort_keys=True, indent=4, separators=(',', ': '))\n print(f\"Processed {line_count} intersections and saved as {output_filename}\")\n \n\n",
"_____no_output_____"
],
[
"read_file()\n\n#python3 -m json.tool output_filename.geojson > output_filenameV2.geojson\n\n\n\n",
"Processing line #1\nProcessing line #2\nProcessing line #3\nProcessing line #4\nProcessing line #5\nProcessing line #6\nProcessing line #7\nProcessing line #8\nProcessing line #9\nProcessing line #10\nProcessing line #11\nProcessing line #12\nProcessing line #13\nProcessing line #14\nProcessing line #15\nProcessing line #16\nProcessing line #17\nProcessing line #18\nProcessing line #19\nProcessing line #20\nProcessing line #21\nProcessing line #22\nProcessing line #23\nProcessing line #24\nProcessing line #25\nProcessing line #26\nProcessing line #27\nProcessing line #28\nProcessing line #29\nProcessing line #30\nProcessing line #31\nProcessing line #32\nProcessing line #33\nProcessing line #34\nProcessing line #35\nProcessing line #36\nProcessing line #37\nProcessing line #38\nProcessing line #39\nProcessing line #40\nProcessing line #41\nProcessing line #42\nProcessing line #43\nProcessing line #44\nProcessing line #45\nProcessing line #46\nProcessing line #47\nProcessing line #48\nProcessing line #49\nProcessing line #50\nProcessing line #51\nProcessing line #52\nProcessing line #53\nProcessing line #54\nProcessing line #55\nProcessing line #56\nProcessing line #57\nProcessing line #58\nProcessing line #59\nProcessing line #60\nProcessing line #61\nProcessing line #62\nProcessing line #63\nProcessing line #64\nProcessing line #65\nProcessing line #66\nProcessing line #67\nProcessing line #68\nProcessing line #69\nProcessing line #70\nProcessing line #71\nProcessing line #72\nProcessing line #73\nProcessing line #74\nProcessing line #75\nProcessing line #76\nProcessing line #77\nProcessing line #78\nProcessing line #79\nProcessing line #80\nProcessing line #81\nProcessing line #82\nProcessing line #83\nProcessing line #84\nProcessing line #85\nProcessing line #86\nProcessing line #87\nProcessing line #88\nProcessing line #89\nProcessing line #90\nProcessing line #91\nProcessing line #92\nProcessing line #93\nProcessing line #94\nProcessing line #95\nProcessing line #96\nProcessing line #97\nProcessing line #98\nProcessing line #99\nProcessing line #100\nProcessed 100 intersections and saved as ../WalnutCreekIntersections-Mar2022.geojson\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74f0160061aa534465aca39cf6da14ed9919109 | 119,034 | ipynb | Jupyter Notebook | tutorials/06-MNIST_dataset.ipynb | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 63 | 2020-04-20T16:31:16.000Z | 2022-03-29T01:05:35.000Z | tutorials/06-MNIST_dataset.ipynb | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 5 | 2020-04-21T11:31:39.000Z | 2022-03-24T13:42:56.000Z | tutorials/06-MNIST_dataset.ipynb | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 8 | 2020-04-21T09:16:42.000Z | 2022-02-23T16:28:43.000Z | 286.828916 | 41,856 | 0.921224 | [
[
[
"# Evasion and Poisoning Attacks on MNIST dataset\n\n\nIn this tutorial we show how to load the **MNIST handwritten digits dataset** \n and use it to train a Support Vector Machine (SVM).\n\nLater we are going to perform Evasion and Poisoning attacks against the trained\n classifier, as previosuly described in [evasion](03-Evasion.ipynb) and \n [poisoning](05-Poisoning.ipynb) tutorials.\n\n[](\nhttps://colab.research.google.com/github/pralab/secml/blob/HEAD/tutorials/06-MNIST_dataset.ipynb)",
"_____no_output_____"
]
],
[
[
"%%capture --no-stderr --no-display\n# NBVAL_IGNORE_OUTPUT\n\ntry:\n import secml\nexcept ImportError:\n %pip install git+https://gitlab.com/secml/secml",
"_____no_output_____"
]
],
[
[
"## Training of the classifier\n\nFirst, we load the dataset and train the classifier. For this tutorial, \n we only consider 2 digits, the 5 (five) and the 9 (nine).",
"_____no_output_____"
]
],
[
[
"# NBVAL_IGNORE_OUTPUT\nfrom secml.data.loader import CDataLoaderMNIST\n\n# MNIST dataset will be downloaded and cached if needed\nloader = CDataLoaderMNIST()",
"_____no_output_____"
],
[
"random_state = 999\n\nn_tr = 100 # Number of training set samples\nn_val = 500 # Number of validation set samples\nn_ts = 500 # Number of test set samples\n\ndigits = (5, 9)\n\ntr_val = loader.load('training', digits=digits, num_samples=n_tr + n_val)\nts = loader.load('testing', digits=digits, num_samples=n_ts)\n\n# Split in training and validation set\ntr = tr_val[:n_tr, :]\nval = tr_val[n_tr:, :]\n\n# Normalize the features in `[0, 1]`\ntr.X /= 255\nval.X /= 255\nts.X /= 255\n\nfrom secml.ml.classifiers import CClassifierSVM\n# train SVM in the dual space, on a linear kernel, as needed for poisoning\nclf = CClassifierSVM(C=10, kernel='linear')\n\nprint(\"Training of classifier...\")\nclf.fit(tr.X, tr.Y)\n\n# Compute predictions on a test set\ny_pred = clf.predict(ts.X)\n\n# Metric to use for performance evaluation\nfrom secml.ml.peval.metrics import CMetricAccuracy\nmetric = CMetricAccuracy()\n\n# Evaluate the accuracy of the classifier\nacc = metric.performance_score(y_true=ts.Y, y_pred=y_pred)\n\nprint(\"Accuracy on test set: {:.2%}\".format(acc))",
"Training of classifier...\nAccuracy on test set: 93.60%\n"
]
],
[
[
"## Evasion attack with MNIST dataset\n\nLet's define the attack parameters. Firstly, we chose to generate an *l2*\n perturbation within a maximum ball of radius `eps = 2.5` from the initial \n points. Secondly, we also add a low/upper bound as our feature space \n is limited in `[0, 1]`. Lastly, as we are not interested in generating \n adversarial examples for a specific class, we perform an error-generic attack\n by setting `y_target = None`.\n\n*Please note that the attack using the MNIST dataset may take a while \n (up to a few minutes) depending on the machine the script is run on.*",
"_____no_output_____"
]
],
[
[
"# For simplicity, let's attack a subset of the test set\nattack_ds = ts[:25, :]\n\nnoise_type = 'l2' # Type of perturbation 'l1' or 'l2'\ndmax = 2.5 # Maximum perturbation\nlb, ub = 0., 1. # Bounds of the attack space. Can be set to `None` for unbounded\ny_target = None # None if `error-generic` or a class label for `error-specific`\n\n# Should be chosen depending on the optimization problem\nsolver_params = {\n 'eta': 0.5, \n 'eta_min': 2.0, \n 'eta_max': None,\n 'max_iter': 100, \n 'eps': 1e-6\n}\n\nfrom secml.adv.attacks import CAttackEvasionPGDLS\npgd_ls_attack = CAttackEvasionPGDLS(classifier=clf,\n double_init_ds=tr,\n distance=noise_type, \n dmax=dmax,\n solver_params=solver_params,\n y_target=y_target)\n\nprint(\"Attack started...\")\neva_y_pred, _, eva_adv_ds, _ = pgd_ls_attack.run(attack_ds.X, attack_ds.Y)\nprint(\"Attack complete!\")\n\nacc = metric.performance_score(\n y_true=attack_ds.Y, y_pred=clf.predict(attack_ds.X))\nacc_attack = metric.performance_score(\n y_true=attack_ds.Y, y_pred=eva_y_pred)\n\nprint(\"Accuracy on reduced test set before attack: {:.2%}\".format(acc))\nprint(\"Accuracy on reduced test set after attack: {:.2%}\".format(acc_attack))",
"Attack started...\nAttack complete!\nAccuracy on reduced test set before attack: 100.00%\nAccuracy on reduced test set after attack: 12.00%\n"
]
],
[
[
"We can observe how the classifier trained on the MNIST dataset has been \n *successfully evaded* by the adversarial examples generated by our attack.\n \nLet's now visualize few of the adversarial examples. The first row are the \n original samples and the second row are the adversarial examples. Above each\n digit it is shown the true label and the predicted label in parenthesis. ",
"_____no_output_____"
]
],
[
[
"from secml.figure import CFigure\n# Only required for visualization in notebooks\n%matplotlib inline\n\n# Let's define a convenience function to easily plot the MNIST dataset\ndef show_digits(samples, preds, labels, digs, n_display=8):\n samples = samples.atleast_2d()\n n_display = min(n_display, samples.shape[0])\n fig = CFigure(width=n_display*2, height=3)\n for idx in range(n_display):\n fig.subplot(2, n_display, idx+1)\n fig.sp.xticks([])\n fig.sp.yticks([])\n fig.sp.imshow(samples[idx, :].reshape((28, 28)), cmap='gray')\n fig.sp.title(\"{} ({})\".format(digits[labels[idx].item()], digs[preds[idx].item()]),\n color=(\"green\" if labels[idx].item()==preds[idx].item() else \"red\"))\n fig.show()\n\nshow_digits(attack_ds.X, clf.predict(attack_ds.X), attack_ds.Y, digits)\nshow_digits(eva_adv_ds.X, clf.predict(eva_adv_ds.X), eva_adv_ds.Y, digits)",
"_____no_output_____"
]
],
[
[
"## Poisoning attack with MNIST dataset\n\nFor poisoning attacks the parameters are much simpler. We set the the bounds \n of the attack space and the number of adversarial points to generate, \n 50 in this example. Lastly, we chose the solver parameters for this \n specific optimization problem.\n\n*Please note that the attack using the MNIST dataset may take a while \n (up to a few minutes) depending on the machine the script is run on.*\n ",
"_____no_output_____"
]
],
[
[
"lb, ub = 0., 1. # Bounds of the attack space. Can be set to `None` for unbounded\nn_poisoning_points = 15 # Number of poisoning points to generate\n\n# Should be chosen depending on the optimization problem\nsolver_params = {\n 'eta': 0.25,\n 'eta_min': 2.0,\n 'eta_max': None,\n 'max_iter': 100,\n 'eps': 1e-6\n}\n\nfrom secml.adv.attacks import CAttackPoisoningSVM\npois_attack = CAttackPoisoningSVM(classifier=clf,\n training_data=tr,\n val=val,\n lb=lb, ub=ub,\n solver_params=solver_params,\n random_seed=random_state)\npois_attack.n_points = n_poisoning_points\n\n# Run the poisoning attack\nprint(\"Attack started...\")\npois_y_pred, _, pois_points_ds, _ = pois_attack.run(ts.X, ts.Y)\nprint(\"Attack complete!\")\n\n# Evaluate the accuracy of the original classifier\nacc = metric.performance_score(y_true=ts.Y, y_pred=clf.predict(ts.X))\n# Evaluate the accuracy after the poisoning attack\npois_acc = metric.performance_score(y_true=ts.Y, y_pred=pois_y_pred)\n\nprint(\"Original accuracy on test set: {:.2%}\".format(acc))\nprint(\"Accuracy after attack on test set: {:.2%}\".format(pois_acc))\n\n# Training of the poisoned classifier for visualization purposes\npois_clf = clf.deepcopy()\npois_tr = tr.append(pois_points_ds) # Join the training set with the poisoning points\npois_clf.fit(pois_tr.X, pois_tr.Y)\n\nshow_digits(pois_points_ds.X, pois_clf.predict(pois_points_ds.X), \n pois_points_ds.Y, digits)",
"Attack started...\nAttack complete!\nOriginal accuracy on test set: 93.60%\nAccuracy after attack on test set: 50.40%\n"
]
],
[
[
"We can see that the classifier trained on the MNIST dataset has been \n successfully poisoned. To increase the attack power, more poisoning points\n can be crafted, at the expense of a much slower optimization process.\n \nLet's note that the label of each adversarial example we show has been \n *flipped* by the attack with respect to the actual true label. Thus, the \n predicted label (parenthesis) by the poisoned classifier is displayed \n in green when *different* from the true label of the digit.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74f08b7d04d688e63ed4ed2b88ee88d98c6a928 | 60,407 | ipynb | Jupyter Notebook | tts/tts-1-1-entropy.ipynb | laic/uoe_speech_processing_course | 7cbc0424e87a8a98fd92fb664c9c156c83323f78 | [
"MIT"
] | 19 | 2020-09-20T17:01:53.000Z | 2021-12-15T18:24:06.000Z | tts/tts-1-1-entropy.ipynb | laic/uoe_speech_processing_course | 7cbc0424e87a8a98fd92fb664c9c156c83323f78 | [
"MIT"
] | null | null | null | tts/tts-1-1-entropy.ipynb | laic/uoe_speech_processing_course | 7cbc0424e87a8a98fd92fb664c9c156c83323f78 | [
"MIT"
] | 10 | 2020-09-25T08:09:50.000Z | 2021-09-14T03:28:01.000Z | 146.263923 | 9,868 | 0.886172 | [
[
[
"#### _Speech Processing: TTS_",
"_____no_output_____"
]
],
[
[
"# run this first\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport IPython",
"_____no_output_____"
]
],
[
[
"# 1 Entropy\n\n\n### Learning Outcomes\n* Understand that entropy measures uncertainty\n* Gain some intutitions about how entropy behaves\n* See that entropy can be reduced by splitting a data set into two partitions\n\n### Need to know\n* Topic Videos: Decision tree, Learning decision trees\n\nOur goal in this sequence of notebooks is to understand how a classification tree is learned from data. \n\nEach split of the data in a decision tree decreases uncertainty about the value of the predictee: we become more and more certain of its value as we descend the tree. We can measure the amount of uncertainty using entropy.\n",
"_____no_output_____"
],
[
"## 1.1 How entropy is calculated",
"_____no_output_____"
],
[
"Consider a categorical variable with M possible values, or *classes*.\n\nEntropy is defined as\n\n$$ \\Large H = - \\sum_{i=1}^{M} p_i log_2(p_i) $$\n\nwhere the $p_i$ are the probabilities of each of the $M$ classes. $H$ is the entropy in **bits**. It is a measure of uncertainty. Higher entropy means \"more unpredictable / higher uncertainty\". Lower entropy means \"more predictable / more certainty\". \n\nTo help you inderstand the equation, here's a short video",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\nIPython.display.IFrame(width=\"640\",height=\"428\",src=\"https://fast.wistia.net/embed/iframe/utpd6km04m\")",
"_____no_output_____"
]
],
[
[
"and here's a Python function to compute entropy from an array of counts, or probabilities. (It works for either case.)",
"_____no_output_____"
]
],
[
[
"def entropy(counts):\n \"\"\" accepts an array of counts or probabilities and computes -1 * sum {p * log p}\"\"\"\n H=0 # entropy\n total_count=float(sum(counts))\n for c in counts:\n if c > 0: # cannot take log of zero\n p=float(c)/total_count\n H=H + p * math.log2(p)\n H=H*-1.0\n return H # in bits, because log was base 2",
"_____no_output_____"
]
],
[
[
"## 1.2 Get an intuitive understanding of entropy",
"_____no_output_____"
],
[
"To help you visualise probability distributions, here's a function for plotting one. It also computes the entropy of the distribution.",
"_____no_output_____"
]
],
[
[
"def plot_distribution(labels,counts,title='Distribution'):\n if sum(counts) == 0:\n print(\"Cannot handle this case!\")\n return 0\n total_count=float(sum(counts))\n pdf = [c / total_count for c in counts]\n x_pos = [i for i, _ in enumerate(labels)]\n plt.bar(x_pos, pdf, color='blue')\n plt.title(title+\" (entropy={:.3} bits)\".format(entropy(counts)))\n plt.xlabel(\"label\")\n plt.ylabel(\"probability\")\n plt.xticks(x_pos, labels)\n plt.show()",
"_____no_output_____"
]
],
[
[
"### 1.2.1 What entropy measures about a probability distribution\n\nNow find out by experimentation what the **highest and lowest values of entropy** are. The variable (which will be called the predictee when we build a Decision Tree) here is \"Fruit\" and it has two possible values (= classes) of \"Apple\" and \"Orange\". You are going to directly manipulate the count of each class in the code and see what the effect on the entropy is.",
"_____no_output_____"
]
],
[
[
"# the labels of the two classes (i.e, the values the categorical random variable \"Fruit\" can take)\nlabels = ['Apple', 'Orange']\n\n# the number of examples of each class in our data set\ncounts = [4, 10] # <- play with the distribution of counts\n\nplot_distribution(labels,counts,\"Fruit\")\n",
"_____no_output_____"
]
],
[
[
"### 1.2.2 Try different numbers of classes\nWhat is the relationship between the number of classes and the **highest value of entropy** you can acheive?\n(Hint: try with 2, 4, and 8 classes, as well as other numbers.)",
"_____no_output_____"
]
],
[
[
"# add and remove classes to change how many there are\nlabels = ['k', 's', 'ʃ', 'tʃ']\n\n# the number of counts must match the number of classes\ncounts = [11180, 2185, 1170, 2005] # <- play with the distribution of counts\n\n# for example, how about a distribution over 5 classes\nlabels = ['a', 'b', 'c', 'd', 'e']\ncounts = [12, 45, 101, 22, 99] # <- play with the distribution of counts\n\n# or over 8 classes (or any other number - please experiment!)\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\ncounts = [12, 45, 101, 22, 99, 17, 9, 4 ] # <- play with the distribution of counts\n\nplot_distribution(labels,counts,\"My distribution\")",
"_____no_output_____"
]
],
[
[
"Now go back to the equation and relate what you have found by experimentation to the terms in the equation. Where in the equation is the number of classes that you just varied? Where in the equation is the probability distribution over those classes?",
"_____no_output_____"
],
[
"## 1.3 Reduce entropy\n\nFrom your experiments above you should have learned that a uniform distribution has maximum entropy, and anything you do to make it more uneven will reduce entropy. The limit is reached when only one class has a non-zero count: then the entropy is zero.\n\nOur decision tree will be trying to reduce entropy. It seems that the way to do that is to make the probability distribution less uniform (more 'uneven').\n\nSo, how can we make a probability distribution less uniform? Your next task is to take a distribution and split it into two distributions that have lower entropy. Try different ways to split the ditribution. How much can you reduce the entropy? Is it ever possible to *increase* the entropy?\n\nHere's the original distribution, over 4 classes:",
"_____no_output_____"
]
],
[
[
"labels = ['k', 's', 'ʃ', 'tʃ'] # do not change this\ncounts = np.array([11180, 2185, 1170, 2005]) # do not change this\n\nprint(\"The distribution before the split was\",counts)\nprint(\"and the entropy of that distribution is {:.3} bits\".format(entropy(counts)))\nplot_distribution(labels,counts,\"Original distribution\")",
"The distribution before the split was [11180 2185 1170 2005]\nand the entropy of that distribution is 1.41 bits\n"
]
],
[
[
"Now we split the above counts into two partitions. We'll call then 'left' and 'right' because we're eventually going to build a decision tree (not yet though!).",
"_____no_output_____"
]
],
[
[
"# play around with these values (they can't be larger than the original counts above though)\nleft_counts = np.array([46, 1339, 12, 104])\n\nright_counts = np.subtract(counts,left_counts) # this is the remaining data; do not change this line\n\nprint(\"The two distributions after the split are\",left_counts,\"and\",right_counts)\nprint(\"Entropies of the two distributions are {:.3} bits and {:.3} bits.\".format(entropy(left_counts),entropy(right_counts)))\n\n# the total entropy after splitting is simply a weighted sum of the two entropies\ntotal_entropy = ( sum(left_counts)*entropy(left_counts) + sum(right_counts)*entropy(right_counts) ) / (sum(left_counts) + sum(right_counts))\nprint(\"Total entropy of the two distributions is {:.3} bits \".format(total_entropy))\nprint(\"which is a reduction of {:.3} bits compared to the original distribution.\".format(entropy(counts)-total_entropy))\n\nplot_distribution(labels,left_counts,\"Left partition\")\nplot_distribution(labels,right_counts,\"Right partition\")",
"The two distributions after the split are [ 46 1339 12 104] and [11134 846 1158 1901]\nEntropies of the two distributions are 0.624 bits and 1.22 bits.\nTotal entropy of the two distributions is 1.16 bits \nwhich is a reduction of 0.244 bits compared to the original distribution.\n"
]
],
[
[
"## Summary\n\nYou should now understand how entropy behaves. As we make a probability distribution more and more predictable, entropy reduces. This will be the goal of our decision tree: to reduce uncertainty about the value of the predictee.\n\nIn this notebook, when you split a distribution into two partitions, you should have been able to acheive large reductions in entropy, like in the \"ideal Decision Tree\" in the topic video [Learning Decision Trees](https://speech.zone/courses/speech-processing/module-5-speech-synthesis/).\n\nWhen we build our decision tree, we can only make splits based on questions about the *predictors*. This is because, at inference time, that is all we will know.\n\nWe'll do that in the next notebook.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74f0f755c1800f445045d0676d69dce36af106f | 306,157 | ipynb | Jupyter Notebook | 07_Visualization/Chipotle/Exercises.ipynb | geneh0/pandas_exercises | e94dc86ba52726abb5c29d4553a136c51278e2fa | [
"BSD-3-Clause"
] | null | null | null | 07_Visualization/Chipotle/Exercises.ipynb | geneh0/pandas_exercises | e94dc86ba52726abb5c29d4553a136c51278e2fa | [
"BSD-3-Clause"
] | null | null | null | 07_Visualization/Chipotle/Exercises.ipynb | geneh0/pandas_exercises | e94dc86ba52726abb5c29d4553a136c51278e2fa | [
"BSD-3-Clause"
] | null | null | null | 1,360.697778 | 237,427 | 0.64871 | [
[
[
"# Visualizing Chipotle's Data",
"_____no_output_____"
],
[
"This time we are going to pull data directly from the internet.\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import Counter\n\n# set this so the graphs open internally\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). ",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called chipo.",
"_____no_output_____"
]
],
[
[
"chipo = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv', sep = '\\t')",
"_____no_output_____"
]
],
[
[
"### Step 4. See the first 10 entries",
"_____no_output_____"
]
],
[
[
"chipo.head(10)",
"_____no_output_____"
]
],
[
[
"### Step 5. Create a histogram of the top 5 items bought",
"_____no_output_____"
]
],
[
[
"chipo.item_name.value_counts()[0:5].plot(kind = 'bar');",
"_____no_output_____"
]
],
[
[
"### Step 6. Create a scatterplot with the number of items orderered per order price\n#### Hint: Price should be in the X-axis and Items ordered in the Y-axis",
"_____no_output_____"
]
],
[
[
"chipo.item_price = chipo.item_price.apply(lambda x: float(x[1:-1]))",
"_____no_output_____"
],
[
"chipo.groupby('order_id').sum().plot(kind = 'scatter', x = 'item_price', y = 'quantity')\n\nplt.title('Order cost by number of items in order')\nplt.xlabel('Order Total')\nplt.ylabel('Number of Items')",
"_____no_output_____"
]
],
[
[
"### Step 7. BONUS: Create a question and a graph to answer your own question.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e74f11147d798ec7555271d63aa181c1465e97b7 | 743,038 | ipynb | Jupyter Notebook | answers/Worksheet 5.1 - Feature Engineering - Answers.ipynb | d3vzer0/applied_data_science_amsterdam | 94b97c557001927b1ddffdfabc09c19364e1370b | [
"MIT"
] | 2 | 2020-02-20T12:40:25.000Z | 2021-07-13T06:27:15.000Z | answers/Worksheet 5.1 - Feature Engineering - Answers.ipynb | d3vzer0/applied_data_science_amsterdam | 94b97c557001927b1ddffdfabc09c19364e1370b | [
"MIT"
] | null | null | null | answers/Worksheet 5.1 - Feature Engineering - Answers.ipynb | d3vzer0/applied_data_science_amsterdam | 94b97c557001927b1ddffdfabc09c19364e1370b | [
"MIT"
] | null | null | null | 267.279856 | 542,908 | 0.890814 | [
[
[
"# Load Libraries - Make sure to run this cell!\nimport pandas as pd\nimport numpy as np\nimport re\nimport datetime \nfrom collections import Counter\nfrom sklearn import feature_extraction, tree, model_selection, metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nimport seaborn as sns\nfrom yellowbrick.features.rankd import Rank2D\nfrom yellowbrick.features.radviz import RadViz\nfrom yellowbrick.features.pcoords import ParallelCoordinates\nfrom yellowbrick.features import JointPlotVisualizer\nfrom yellowbrick.classifier import ConfusionMatrix\nfrom yellowbrick.classifier import ClassificationReport\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport entropy\n%matplotlib inline\n\n# Useful cyber libraries\nimport whois # pip install python-whois\nimport tldextract # pip install tldextract \nimport ipaddress # pip install ipaddress\nimport dns.query # pip install dnspython\nimport dns.resolver\nfrom dns.exception import DNSException",
"_____no_output_____"
]
],
[
[
"<img src=\"../img/logo_white_bkg_small.png\" align=\"right\" /> \n\n\n## Worksheet 5.1 - Feature Engineering: Malicious URL Detection using Machine Learning - Answers\n\nThis worksheet is a step-by-step guide on how to train a Machine Learning model that can detect malicious URLs. We will walk you through the process of transforming raw URL strings to Machine Learning features and creating a Decision Tree Classifer which you will use to determine whether a given URL is malicious or not. Once you have implemented the classifier, the worksheet will walk you through evaluating your model. \n\n### Overview 2 main steps:\n\n1. **Feature Engineering** - from raw URL strings to features using [pandas](http://pandas.pydata.org/pandas-docs/stable/) DataFrame, datetime and [numpy](http://www.numpy.org/) manipulations.\n2. **Machine Learning Classification** - predict whether a URL is malicious or not using a Decision Tree Classifier in [sklearn](http://scikit-learn.org/stable/) and evaluate model performance\n\nWe provide an additional notebook where you can see how to use \"Featureless Deep Learning\" to build such a classifier.\n\n\n### Data\n\nThe dataset was build from various different open source data sources. Computationally intensive tasks such as retrieving the creation time for each unique domain in the data set via [whois](https://pypi.python.org/pypi/python-whois) have already been performed beforehand. Some of the open source URLs came with the zone apex only, others didn't include the protocol, therefore, we uniformly removed the protocol (http:// or https://) and subdomain (e.g. www) from the URL string if applicable.\n\nBenign\n- Custom automated webscraping of [Alexa Top 1M](https://blog.majestic.com/development/majestic-million-csv-daily/) with recursive depth of scraping of level 1.\n\nMalicious\n- Various blacklists\n- [openphish](https://openphish.com/)\n- [phishtank](https://www.phishtank.com/)\n- [public GitHub faizann24](https://github.com/faizann24/Using-machine-learning-to-detect-malicious-URLs)\n- some more sources\n\nThe dataset is perfectly balanced (50% benign and 50% malicious). We emphasized on getting benign URLs with paths and not just the domain. Furthermore, depending on your environment you can choose between a smaller subset (```url_data_small.csv``` containing 4000 URLs balanced) or the full data set (```url_data_full.csv``` containing 87380 URLs balanced).\n",
"_____no_output_____"
]
],
[
[
"## Load data\nDATA_HOME = '../data/'\ndf = pd.read_csv(DATA_HOME + 'url_data_full.csv')\n# df = pd.read_csv(DATA_HOME + 'url_data_small.csv')\ndf.isIP = df.isIP.astype(int)\nprint(df.shape)\ndf.sample(n=5).head() # print a random sample of the DataFrame",
"(87380, 5)\n"
],
[
"df['isMalicious'].value_counts()",
"_____no_output_____"
]
],
[
[
"## Part 1 - Feature Engineering\n\n\nThe traditional approach is to hand-craft Machine Learning features. This can be the most tedious part and often requires extensive domain expertise and data wrangling skills.\n\nPrevious academic research on identifying malicious or suspicious URLs has focused on studying the usefulness of an exhausted list of candidate features. Here, we cover only a selection of some basic and most widely used features.\n\nThere are 4 main \"URL Features\" families:\n1. **BlackList Features**: Check if in any BlackList. BlackLists suffer from a high false negative rate, but can still be useful as a feature.\n2. **Lexical Features**: Using methods from Natural Language Processing. They capture the property of malicious URLs tending to \"look different\" from benign URLs. Therefore, lexical features quantify contextual information such as the length of the URL.\n3. **Host-based Features**: They quantify properties of the web site host and answer \"where\" the site is hosted, \"who\" owns it and \"how\" it is managed. API queries are needed for this type of features (WHOIS, DNS records). Some example features can be the date of registration, geolocation, autonomous system (AS) number, connection speed or time-to-live (TTL).\n4. **Content-based Features**: This is one of the less commonly used feature families as it requires the download of the entire web-page, hence execution of the potential malicious site, which can not only be not safe, but also increases the computational cost of deriving features. Features here can be HTML or JavaScript based.\n\nSource: Sahoo et al. 2017: [Malicious URL Detection using Machine Learning: A Survey](https://arxiv.org/pdf/1701.07179.pdf)\n\nIn this notebook, we focus on a selection of **lexical features** and **host-based features**, starting with the lexical ones in the subsequent code cell. The host-based features instructions will follow in the next markdown cell.\n\n### Feature Engineering Sub-Section A - Lexical Features\n\n\n**Selection of lexical features**:\n\n1. Length of URL [\"Length\"]\n2. Length of hostname/domain [\"LengthDomain]\n3. Count of digits [\"DigitsCount\"]\n4. Entropy of hostname/domain [\"EntropyDomain\"] - use ```H_entropy``` function provided \n5. Position (or index) of the first digit [\"FirstDigitIndex\"] - use ```firstDigitIndex``` function provided \n6. Bag-of-words - more details later\n\nWe provide a couple of helper functions. Please run the following function cell and then continue reading the next markdown cell with more details on how to derive those features. Have fun!\n\n",
"_____no_output_____"
]
],
[
[
"def H_entropy (x):\n # Calculate Shannon Entropy\n return entropy.shannon_entropy(x)\n\ndef firstDigitIndex( s ):\n for i, c in enumerate(s):\n if c.isdigit():\n return i + 1\n return 0",
"_____no_output_____"
]
],
[
[
"### Tasks - Sub-Section A - Lexical Features\n\nAppend features to the pandas 2D DataFrame ```df``` with a new column for each feature. Later, simply drop the columns that are not features. Please focus on ```[\"Length\"]```, ```[\"LengthDomain]```, ```[\"DigitsCount\"]```, ```[\"EntropyDomain\"]``` and ```[\"FirstDigitIndex\"]``` here. [pandas.Series.str](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.html), [pandas.Series.replace](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.replace.html) and [pandas.Series,apply](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.apply.html) and [tldextract](https://pypi.python.org/pypi/tldextract) can be very helpful to quickly derive those features. Functions you need to apply here are provided in above cell.\n\nFor the ```Bag-of-words``` see next instructions in next markdown cell...\n",
"_____no_output_____"
]
],
[
[
"# derive simple lexical features\ndf['Length'] = df.url.str.len()\ndf['LengthDomain'] = df.domain.str.len()\ndf['DigitsCount'] = df.url.str.count('[0-9]')\ndf['EntropyDomain'] = df.domain.apply(H_entropy)\ndf['FirstDigitIndex'] = df.url.apply(firstDigitIndex)\n\n# check intermediate 2D pandas DataFrame\nprint(len(df.columns))\ndf.sample(n=5)",
"10\n"
]
],
[
[
"### Tasks - Sub-Section A - Lexical Features (continued)\n\nThere are many different approaches of applying ```bag-of-words``` to URLs. Here we suggest the following approach:\n\n1. Extract the different portions of the URL (host names (domains), top-level-domains (tlds) [what is TLD](https://en.wikipedia.org/wiki/Top-level_domain), paths) and create separate pandas Series (or Python lists) using the [tldextract](https://pypi.python.org/pypi/tldextract) library.\n2. (Code for step 2 is provided) Find the top 20 tlds (e.g. ```com```, ```de```, ```ru``` etc) from the data. Then use [sklearn CountVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) to create ```bag-of-words``` with a custom vocabulary, here the top 20 tlds as parameter input. Use the ```.fit()``` method to train the CountVectorizer model (save the model in a variable, you will need this model later for real-time transformations of new URLs). After this the ```.transform()``` function is applied to the pandas Series or list of tlds. The resulting matrix is dense, therefore ```.toarray()``` is needed to get a regular numpy matrix. You will notice that this numpy matrix is very sparse, that is, contains a lot of zeros. The ```get_feature_names()``` is useful to get not only the vocabulary, but also to know which column of the matrix corresponds to which ```word```.\n3. Knowing procedures for step 2, please try to do something similar for the domains. However, choose an ```ngram``` approach via setting the following parameters for the CountVectorizer: ```analyzer='char', ngram_range=(3, 4), max_features=30```.\n4. (Code for step 4 is provided) Again, we provide you with the solution to applying a different CountVectorizer approach to the path using ```analyzer='word', tokenizer=custom_path_tokenizer, max_features=100``` as parameters.\n5. Feel free to try different approaches.\n\nAt each step the numpy matrix is converted to a pandas DataFrame and then concatenated to the previous one and so on. That way you can run one cell multiple times without re-concatenating to the original df which would throw errors. At the end simply replace the original df with the df that contains all bag-of-words features.\n",
"_____no_output_____"
]
],
[
[
"def extract_path(url):\n return re.sub('.'.join([tldextract.extract(url).domain, tldextract.extract(url).suffix]), '', url)",
"_____no_output_____"
],
[
"domains = df.url.apply(lambda x: tldextract.extract(x).domain)\ntlds = df.url.apply(lambda x: tldextract.extract(x).suffix)\npaths = df.url.apply(extract_path)",
"_____no_output_____"
],
[
"n_tlds = 20\ntop_tlds = list(tlds.value_counts().head(n_tlds).keys())\ntop_tlds = [tld if tld is not '' else 'nan' for tld in top_tlds] # encode empty/missing tld as 'nan'",
"_____no_output_____"
],
[
"CountVectorizer_tlds = CountVectorizer(analyzer='word', vocabulary=top_tlds)\nCountVectorizer_tlds = CountVectorizer_tlds.fit(tlds)\nmatrix_dense_tlds = CountVectorizer_tlds.transform(tlds)\n\nprint(CountVectorizer_tlds.get_feature_names())\nprint(matrix_dense_tlds.shape)\nprint(sum(matrix_dense_tlds.toarray()))",
"['com', 'org', 'net', 'ru', 'co.uk', 'info', 'fr', 'biz', 'pl', 'top', 'us', 'cc', 'pt', 'tv', 'co.kr', 'su', 'ws', 'org.uk', 'club', 'pro']\n(87380, 20)\n[65135 6699 5198 3750 0 1136 901 427 430 345 340 214\n 183 147 0 135 90 0 77 66]\n"
],
[
"df_tlds = pd.DataFrame(matrix_dense_tlds.toarray(), columns=CountVectorizer_tlds.get_feature_names())\n# matrix_dense_tlds.toarray() converts dense matrix to a regular matrix, which will be sparse (a lot of zeros)\ndf1 = pd.concat([df, df_tlds],axis=1)\nprint(len(df1.columns))\ndf1.head()",
"30\n"
],
[
"CountVectorizer_domains = CountVectorizer(analyzer='char', ngram_range=(3, 4), max_features=30)\nCountVectorizer_domains = CountVectorizer_domains.fit(domains)\n\nmatrix_dense_domains = CountVectorizer_domains.transform(domains)\n\ndf_domains = pd.DataFrame(matrix_dense_domains.toarray(), columns=CountVectorizer_domains.get_feature_names())\ndf2 = pd.concat([df1, df_domains],axis=1)\nprint(len(df2.columns))\ndf2.head()",
"60\n"
],
[
"def custom_path_tokenizer(path): # input is a string for one path from one URL\n return list(filter(None, re.compile('[\\?\\=/\\._-]').split(path.lower())))",
"_____no_output_____"
],
[
"CountVectorizer_paths = CountVectorizer(analyzer='word', tokenizer=custom_path_tokenizer, max_features=100)\nCountVectorizer_paths = CountVectorizer_paths.fit(paths)\n\nmatrix_dense_paths = CountVectorizer_paths.transform(paths)\n\ndf_paths = pd.DataFrame(matrix_dense_paths.toarray(), columns=CountVectorizer_paths.get_feature_names())\ndf3 = pd.concat([df2, df_paths],axis=1)\nprint(len(df3.columns))\ndf3.head()",
"160\n"
]
],
[
[
"### Feature Engineering Sub-Section B - Host-based Features\n\n\nDerivation of host-based features often requires the use of APIs or querying information from some authoritative source. It took us 2 days to get all whois data for all of our unique domains (see ```domains_created_db.csv``` file). \n\n**Selection of host-based features**:\n\n1. Time delta between today's date and creation date ['DurationCreated'] (original whois code included at the end of the notebook)\n2. Check if it is an IP address ['isIP'] - already provided, no feature engineering needed \n3. (Time-to-live ['ttl'] - code to query an authoritative nameserver included at the end of the notebook, but not included in preprocessed data set)\n\n\n### Tasks - Sub-Section B - Host-based Features\n\nAppend features to the pandas 2D DataFrame ```df``` with a new column for each feature. Later, simply drop the columns that are not features. [pandas.to_datetime](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html) with ```errors='coerce'``` is easy to use to convert the ```WHOIS``` info ```[\"created\"]``` to a datetime data type. Make sure to also fillna with zeros! You can then simply subtract the creation date from today's date to derive the ```[\"DurationCreated\"]``` feature. [pandas.Series.dt.day](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.dt.day.html) can be handy to express the time delta in days. \n\nAfter all features have been added to the pandas 2D DataFrame, please drop all columns that are not features etc, here drop ```['url', 'created', 'domain']```.\n",
"_____no_output_____"
]
],
[
[
"df=df3\ndf.created = pd.to_datetime(df.created, errors='coerce')\ndf['DurationCreated'] = (pd.to_datetime(datetime.date.today()) - df.created).dt.days\n\n# check final 2D pandas DataFrame containing all final features and the target vector isMalicious\ndf.sample(n=5).head()",
"_____no_output_____"
],
[
"df_final = df\ndf_final = df_final.drop(['url', 'domain', 'created'], axis=1)\ndf_final.to_csv(DATA_HOME + 'url_features_final_df.csv', index=False)\ndf_final.sample(n=5).head()",
"_____no_output_____"
],
[
"# PySpark can't handle colNames with dots, Therefore let's export a modified version\ndf_spark = df_final\nnewColNames = [x.replace(\".\", \"\") for x in df_final.columns]\ndf_spark.columns = newColNames\ndf_spark.to_csv(DATA_HOME + 'url_features_final_df_spark.csv', index=False)\ndf_spark.sample(n=5).head()",
"_____no_output_____"
]
],
[
[
"#### Breakpoint: Load Features and Labels\n\nIf you got stuck in Part 1, please simply load the feature matrix we prepared for you, so you can move on to Part 2 and train a Decision Tree Classifier.",
"_____no_output_____"
]
],
[
[
"df_final = pd.read_csv(DATA_HOME + 'url_features_final_df.csv')\nprint(df_final.isMalicious.value_counts())\nprint(len(df_final.columns))\ndf_final.sample(n=5).head()",
"1 43690\n0 43690\nName: isMalicious, dtype: int64\n158\n"
],
[
"feature_names = list(df_final.columns)\nfeature_names.remove('isMalicious')",
"_____no_output_____"
],
[
"# Pickle certain variables, so they can be loaded again in part 2 to make new predictions\n# feature_names, CountVectorizer_tlds, CountVectorizer_domains, CountVectorizer_paths\nfrom six.moves import cPickle as pickle\n\ntmp_models = {\"feature_names\":feature_names, \"CountVectorizer_tlds\":CountVectorizer_tlds, \n \"CountVectorizer_domains\":CountVectorizer_domains, \"CountVectorizer_paths\":CountVectorizer_paths}\n\nfor key, value in tmp_models.items():\n\n with open(DATA_HOME + key + '.pickle', 'wb') as f:\n pickle.dump(value, f)\n \n# with open(DATA_HOME + key + '.pickle', 'rb') as f:\n# tmp_model = pickle.load(f)\n# print(tmp_model)",
"_____no_output_____"
],
[
"import dns.query\nimport dns.resolver\nfrom dns.exception import DNSException\n\ndef query_authoritative_ns (domain, log=lambda msg: None, ttl_only=True):\n\n default = dns.resolver.get_default_resolver()\n ns = default.nameservers[0]\n\n n = domain.split('.')\n\n for i in range(len(n), 0, -1):\n sub = '.'.join(n[i-1:])\n\n log('Looking up %s on %s' % (sub, ns))\n query = dns.message.make_query(sub, dns.rdatatype.NS)\n response = dns.query.udp(query, ns)\n\n rcode = response.rcode()\n if rcode != dns.rcode.NOERROR:\n if rcode == dns.rcode.NXDOMAIN:\n raise Exception('%s does not exist.' % (sub))\n else:\n raise Exception('Error %s' % (dns.rcode.to_text(rcode)))\n\n if len(response.authority) > 0:\n rrsets = response.authority\n elif len(response.additional) > 0:\n rrsets = [response.additional]\n else:\n rrsets = response.answer\n\n # Handle all RRsets, not just the first one\n for rrset in rrsets:\n for rr in rrset:\n if rr.rdtype == dns.rdatatype.SOA:\n print('Same server is authoritative for %s' % (sub))\n elif rr.rdtype == dns.rdatatype.A:\n ns = rr.items[0].address\n print('Glue record for %s: %s' % (rr.name, ns))\n elif rr.rdtype == dns.rdatatype.NS:\n authority = rr.target\n ns = default.query(authority).rrset[0].to_text()\n print('%s [%s] is authoritative for %s; ttl %i' % (authority, ns, sub, rrset.ttl))\n result = rrset\n if ttl_only:\n print(rrset)\n result = rrset.ttl\n else:\n # IPv6 glue records etc\n #log('Ignoring %s' % (rr))\n pass\n\n return result",
"_____no_output_____"
],
[
"query_authoritative_ns('www.gtkcyber.com')",
"m.gtld-servers.net. [192.55.83.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nh.gtld-servers.net. [192.54.112.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nk.gtld-servers.net. [192.52.178.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\na.gtld-servers.net. [192.5.6.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nd.gtld-servers.net. [192.31.80.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nf.gtld-servers.net. [192.35.51.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nj.gtld-servers.net. [192.48.79.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nb.gtld-servers.net. [192.33.14.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\ne.gtld-servers.net. [192.12.94.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nc.gtld-servers.net. [192.26.92.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nl.gtld-servers.net. [192.41.162.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\ng.gtld-servers.net. [192.42.93.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\ni.gtld-servers.net. [192.43.172.30] is authoritative for com; ttl 169092\ncom. 169092 IN NS m.gtld-servers.net.\ncom. 169092 IN NS h.gtld-servers.net.\ncom. 169092 IN NS k.gtld-servers.net.\ncom. 169092 IN NS a.gtld-servers.net.\ncom. 169092 IN NS d.gtld-servers.net.\ncom. 169092 IN NS f.gtld-servers.net.\ncom. 169092 IN NS j.gtld-servers.net.\ncom. 169092 IN NS b.gtld-servers.net.\ncom. 169092 IN NS e.gtld-servers.net.\ncom. 169092 IN NS c.gtld-servers.net.\ncom. 169092 IN NS l.gtld-servers.net.\ncom. 169092 IN NS g.gtld-servers.net.\ncom. 169092 IN NS i.gtld-servers.net.\nns-cloud-b1.googledomains.com. [216.239.32.107] is authoritative for gtkcyber.com; ttl 21600\ngtkcyber.com. 21600 IN NS ns-cloud-b1.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b2.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b3.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b4.googledomains.com.\nns-cloud-b2.googledomains.com. [216.239.34.107] is authoritative for gtkcyber.com; ttl 21600\ngtkcyber.com. 21600 IN NS ns-cloud-b1.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b2.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b3.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b4.googledomains.com.\nns-cloud-b3.googledomains.com. [216.239.36.107] is authoritative for gtkcyber.com; ttl 21600\ngtkcyber.com. 21600 IN NS ns-cloud-b1.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b2.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b3.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b4.googledomains.com.\nns-cloud-b4.googledomains.com. [216.239.38.107] is authoritative for gtkcyber.com; ttl 21600\ngtkcyber.com. 21600 IN NS ns-cloud-b1.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b2.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b3.googledomains.com.\ngtkcyber.com. 21600 IN NS ns-cloud-b4.googledomains.com.\nSame server is authoritative for www.gtkcyber.com\n"
],
[
"whois.query('gtkcyber.com').creation_date",
"_____no_output_____"
]
],
[
[
"## Visualizing the Features\nIn the last step, you're going to explore the feature space to see which features are potentially useful or not and of course whether there is too much noise to make predictions. \n\nFirst, using [Yellowbrick](http://pythonhosted.org/yellowbrick/examples/examples.html), create a Covariance ranking of the features. Since this section is about visualizing this information and not deriving it, please execute the cell below so that everyone will have the same data and get the same results.",
"_____no_output_____"
]
],
[
[
"## Load data\nDATA_HOME = '../data/'\ndf_final = pd.read_csv(DATA_HOME + 'url_features_final_df.csv')\nfeatures = df_final.loc[:,'isIP':]\ntarget = df_final['isMalicious']",
"_____no_output_____"
],
[
"visualizer = Rank2D(features=features.columns, algorithm='covariance')\nplt.figure(figsize=(16,10))\n\nvisualizer.fit(features, target) # Fit the data to the visualizer\nvisualizer.transform(features) # Transform the data\nvisualizer.poof() # Draw/show/poof the data",
"/Users/cgivre/anaconda3/lib/python3.7/site-packages/yellowbrick/features/rankd.py:262: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n X = X.as_matrix()\n"
]
],
[
[
"### What did you see?\nIf you did this correctly, you should see that most of the features are nearly useless. Next, pick 7 features yourself, either using the `feature_selection` functions in `scikit-learn` or by just picking them yourself, and create a pair plot using Seaborn to determine whether there are clear class boundaries between the classes in these features. ",
"_____no_output_____"
]
],
[
[
"#Gets an arrary of the best features in 1 step.\nbest_features = SelectKBest( score_func=chi2, k=7).fit_transform(features,target)\n\n#Get the feature names and indexes\nbest = SelectKBest( score_func=chi2, k=7).fit(features,target)\nfeature_names = pd.Series(features.columns)\nfeature_names[best.get_support()]",
"_____no_output_____"
],
[
"sns.pairplot(df_final[['isMalicious','Length', 'LengthDomain','DigitsCount', 'FirstDigitIndex', 'news.1', 'the.1', 'DurationCreated']], hue='isMalicious' )",
"/Users/cgivre/anaconda3/lib/python3.7/site-packages/statsmodels/nonparametric/kde.py:488: RuntimeWarning: invalid value encountered in true_divide\n binned = fast_linbin(X, a, b, gridsize) / (delta * nobs)\n/Users/cgivre/anaconda3/lib/python3.7/site-packages/statsmodels/nonparametric/kdetools.py:34: RuntimeWarning: invalid value encountered in double_scalars\n FAC1 = 2*(np.pi*bw/RANGE)**2\n/Users/cgivre/anaconda3/lib/python3.7/site-packages/numpy/core/fromnumeric.py:83: RuntimeWarning: invalid value encountered in reduce\n return ufunc.reduce(obj, axis, dtype, out, **passkwargs)\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74f14c63801c24adcf35df09ec410bc85bdb3ad | 80,669 | ipynb | Jupyter Notebook | evaluacion_JudithCallisaya.ipynb | Jud18/training-python-novice | 7bc3c342583ed4c67ce25bc5e4accf00a4b92ddb | [
"BSD-3-Clause"
] | null | null | null | evaluacion_JudithCallisaya.ipynb | Jud18/training-python-novice | 7bc3c342583ed4c67ce25bc5e4accf00a4b92ddb | [
"BSD-3-Clause"
] | null | null | null | evaluacion_JudithCallisaya.ipynb | Jud18/training-python-novice | 7bc3c342583ed4c67ce25bc5e4accf00a4b92ddb | [
"BSD-3-Clause"
] | null | null | null | 94.349708 | 44,240 | 0.76685 | [
[
[
"## Evaluación realizada\nCompleta lo que falta.\n",
"_____no_output_____"
]
],
[
[
"# instalacion\n!pip install pandas\n!pip install matplotlib\n!pip install pandas-datareader",
"Requirement already satisfied: pandas in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (1.3.3)\nRequirement already satisfied: numpy>=1.17.3 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas) (1.21.2)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas) (2.8.2)\nRequirement already satisfied: pytz>=2017.3 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas) (2021.1)\nRequirement already satisfied: six>=1.5 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from python-dateutil>=2.7.3->pandas) (1.16.0)\nRequirement already satisfied: matplotlib in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (3.4.3)\nRequirement already satisfied: numpy>=1.16 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from matplotlib) (1.21.2)\nRequirement already satisfied: python-dateutil>=2.7 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from matplotlib) (2.8.2)\nRequirement already satisfied: pyparsing>=2.2.1 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from matplotlib) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from matplotlib) (0.10.0)\nRequirement already satisfied: pillow>=6.2.0 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from matplotlib) (8.3.2)\nRequirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from matplotlib) (1.3.2)\nRequirement already satisfied: six in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from cycler>=0.10->matplotlib) (1.16.0)\nRequirement already satisfied: pandas-datareader in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (0.10.0)\nRequirement already satisfied: lxml in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas-datareader) (4.6.3)\nRequirement already satisfied: requests>=2.19.0 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas-datareader) (2.25.1)\nRequirement already satisfied: pandas>=0.23 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas-datareader) (1.3.3)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas>=0.23->pandas-datareader) (2.8.2)\nRequirement already satisfied: numpy>=1.17.3 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas>=0.23->pandas-datareader) (1.21.2)\nRequirement already satisfied: pytz>=2017.3 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from pandas>=0.23->pandas-datareader) (2021.1)\nRequirement already satisfied: six>=1.5 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from python-dateutil>=2.7.3->pandas>=0.23->pandas-datareader) (1.16.0)\nRequirement already satisfied: idna<3,>=2.5 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from requests>=2.19.0->pandas-datareader) (2.10)\nRequirement already satisfied: chardet<5,>=3.0.2 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from requests>=2.19.0->pandas-datareader) (4.0.0)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from requests>=2.19.0->pandas-datareader) (2021.5.30)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\usuario\\anaconda3\\envs\\learning python\\lib\\site-packages (from requests>=2.19.0->pandas-datareader) (1.26.6)\n"
],
[
"# 1 importa las bibliotecas\nimport pandas as pd\nimport pandas_datareader.data as web\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# 2. Establecer una fecha de inicio \"2020-01-01\" y una fecha de finalización \"2021-08-31\"\n\nstart_date = \"2020-01-01\"\n\nend_date = \"2021-08-31\"",
"_____no_output_____"
],
[
"# 3.Usar el método del lector de datos para almacenar los datos\n# del precio de las acciones de facebook ('FB') en un DataFrame llamado data.\n# https://finance.yahoo.com/quote/FB/history?p=FB\ndata = web.DataReader(name='FB', data_source='yahoo', start=start_date, end=end_date)\ndata\n# La salida se ve igual a la que leemos en cualquier archivo CSV.",
"_____no_output_____"
],
[
"# 4. Explica el resultado.\nprint(\"El volumen en la Bolsa se refiere a la cantidad de títulos negociados (equivalente en dinero) de una acción en un periodo determinado.\")\nprint(\"El volumen nos indica el interés de los inversores por una acción concreta. Atendiendo al volumen medio, existen valores de los que se negocian millones de títulos al día, mientras podemos encontrar otros de los que apenas se negocian unos cientos. Es habitual que las acciones de grandes empresas, o de gran capitalización, sean más negociadas que las de empresas más pequeñas o de baja capitalización.\")\nprint(\"Lo que realmente nos va a interesar a la hora de operar en el mercado son los cambios en el volumen de una acción. Un incremento del volumen nos dirá que hay más agentes en el mercado interesados en negociar la acción, o bien hay agentes que están operando por cuantías mayores.\")\nprint(\"Para los analistas técnicos el volumen es muy importante, ya que puede dar mayor validez o credibilidad a la detección las señales técnicas de compra o venta de una acción. A priori, una tendencia será más fuerte cuando el volumen va creciendo, ya que nos indica que el dinero que entra en el mercado está presionando el precio cada vez más, al alza en una tendencia alcista o a la baja en una tendencia bajista.\")\n\nprint(\"También es interesante ver qué precios concentran mayor volumen durante la sesión, con independencia del momento de la misma en el que se hayan producido las negociaciones. Esto nos puede indicar que existe cierta propensión a comprar o vender la acción a determinados precios, información que podemos utilizar para posicionar nuestras órdenes en el mercado.\") \nprint(\"Los precios de concentración de mayor volumen pueden tender a formar soportes y resistencias. El volumen por precio también nos puede dar pistas sobre los precios a los que están comprando o vendiendo las manos fuertes del mercado.\")",
"El volumen en la Bolsa se refiere a la cantidad de títulos negociados (equivalente en dinero) de una acción en un periodo determinado.\nEl volumen nos indica el interés de los inversores por una acción concreta. Atendiendo al volumen medio, existen valores de los que se negocian millones de títulos al día, mientras podemos encontrar otros de los que apenas se negocian unos cientos. Es habitual que las acciones de grandes empresas, o de gran capitalización, sean más negociadas que las de empresas más pequeñas o de baja capitalización.\nLo que realmente nos va a interesar a la hora de operar en el mercado son los cambios en el volumen de una acción. Un incremento del volumen nos dirá que hay más agentes en el mercado interesados en negociar la acción, o bien hay agentes que están operando por cuantías mayores.\nPara los analistas técnicos el volumen es muy importante, ya que puede dar mayor validez o credibilidad a la detección las señales técnicas de compra o venta de una acción. A priori, una tendencia será más fuerte cuando el volumen va creciendo, ya que nos indica que el dinero que entra en el mercado está presionando el precio cada vez más, al alza en una tendencia alcista o a la baja en una tendencia bajista.\nTambién es interesante ver qué precios concentran mayor volumen durante la sesión, con independencia del momento de la misma en el que se hayan producido las negociaciones. Esto nos puede indicar que existe cierta propensión a comprar o vender la acción a determinados precios, información que podemos utilizar para posicionar nuestras órdenes en el mercado.\nLos precios de concentración de mayor volumen pueden tender a formar soportes y resistencias. El volumen por precio también nos puede dar pistas sobre los precios a los que están comprando o vendiendo las manos fuertes del mercado.\n"
]
],
[
[
"\n* Entender los movimientos del precio, si suben o bajan.\n* Los precios de las acciones se mueven constantemente a lo largo del día de trading a medida que la oferta y la demanda de acciones cambian (precio mas alto o mas bajo). Cuando el mercado cierra, se registra el precio final de la acción.\n* EL precio de Apertura: Precio con el que un Valor inicia sus transacciones en una sesión bursátil. Normalmente este precio no tiene gran diferencia con el precio de cierre (salvo algun acontecimiento importante).\n* El precio de cierre: Es la última cotización que registró durante el día en el mercado bursátil de un determinado título financiero. Nos podemos referir a la acción de una empresa, un índice, la moneda local u otro activo similar.\n\n* El precio de cierre ajustado representa el precio de cierre preciso basado en acciones corporativas. Por ejemplo, si el precio de cierre de las acciones de la empresa ABC era de USD 21.90 pero se pagaron dividendos de 100 centimos por accion, el precio de cierre se ajustara a USD 20.90.\n\n* El volumen mide la cantidad de acciones que se han comprado y vendido en un periodo determinado para una accion en concreto en este caso (FB). Se debe analizar el volumen en relacion a los volumenes anteriores, si suben o bajan.",
"_____no_output_____"
]
],
[
[
"# 5. Muestre un resumen de la información básica sobre este DataFrame y sus datos \n# use la funcion dataFrame.info() y dataFrame.describe()\ndata.describe()",
"_____no_output_____"
],
[
"# 6. Devuelve las primeras 5 filas del DataFrame con dataFrame.head() o dataFrame.iloc[]\ndata.head(5)",
"_____no_output_____"
],
[
"# 7. Seleccione solo las columnas 'Open','Close' y 'Volume' del DataFrame con dataFrame.loc\ndata.loc[:, ['Open', 'Close', 'Volume']]",
"_____no_output_____"
],
[
"# Ver el rango de lo datos\ndata.index.min(), data.index.max()",
"_____no_output_____"
],
[
"# 8. Ahora grafica los datos de \"Close\" usando la biblioteca matplotlib en Python, \n# 9. Agrega title, marker, linestyle y color para mejorar la visualizacion\n\nclose = data['Close']\nax = close.plot(title='Facebook', linestyle='-', color='c')\nax.set_xlabel('Date')\nax.set_ylabel('Close')\nax.grid() #opcional\nplt.show()\n",
"_____no_output_____"
],
[
"# 10. Explica la grafica sencilla de linea\nprint(\"Un gráfico de precios es útil porque nos ayuda a identificar, mediante niveles de referencia de soporte y resistencia, el momento más adecuado para comprar o vender Acciones en el Mercado de Capitales, mejor conocido como de Renta Variable. \")\nprint(\"Económicamente, el comportamiento de los precios que observamos en el gráfico es reflejo de cambios en la oferta y la demanda, ya que cuando la oferta excede a la demanda los precios tienden a caer, mientras que cuando la demanda supera a la oferta los precios tienden a subir.\")\nprint(\"Por lo tanto, a través del gráfico de precios estaremos obteniendo una lectura objetiva del verdadero sentimiento del mercado, lo que sustentará una postura de compra “Bullish” o de venta “Bearish”. Además, debemos considerar que el comportamiento de los precios se desarrolla de tres maneras, en tendencia alcista, en tendencia bajista y en modo lateral o “trading”.\")\nprint(\"La grafica de lineas es un tipo de gráfico que solamente considera el precio de cierre de cada periodo (Días, semana, mes, etc.), y que por lo tanto nos brinda poca información o de manera incompleta.\")",
"Un gráfico de precios es útil porque nos ayuda a identificar, mediante niveles de referencia de soporte y resistencia, el momento más adecuado para comprar o vender Acciones en el Mercado de Capitales, mejor conocido como de Renta Variable. \nEconómicamente, el comportamiento de los precios que observamos en el gráfico es reflejo de cambios en la oferta y la demanda, ya que cuando la oferta excede a la demanda los precios tienden a caer, mientras que cuando la demanda supera a la oferta los precios tienden a subir.\nPor lo tanto, a través del gráfico de precios estaremos obteniendo una lectura objetiva del verdadero sentimiento del mercado, lo que sustentará una postura de compra “Bullish” o de venta “Bearish”. Además, debemos considerar que el comportamiento de los precios se desarrolla de tres maneras, en tendencia alcista, en tendencia bajista y en modo lateral o “trading”.\nLa grafica de lineas es un tipo de gráfico que solamente considera el precio de cierre de cada periodo (Días, semana, mes, etc.), y que por lo tanto nos brinda poca información o de manera incompleta.\n"
]
],
[
[
"* Un gráfico de cierre es un tipo de gráfico que se utiliza normalmente para ilustrar los movimientos en el precio de un instrumento financiero a lo largo del tiempo.\n* El gráfico muestra los movimientos de la cotización de Facebook desde el 01/01/2020 hasta el 31/08/2021. La línea une los precios de cierre diarios, es decir, se relaciona con el precio al que cierra un acción en una jornada o rueda de bolsa. \n* Conocer el precio de cierre es importante porque este es el precio con el que iniciará la siguiente subasta de apertura de la cotización de la acción.",
"_____no_output_____"
],
[
"*Fuente:https://finance.yahoo.com/quote/FB/history?p=FB",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e74f1bf234dde4d8c2073b7d61cdfa6a45653ed3 | 102,954 | ipynb | Jupyter Notebook | matplotlib/.ipynb_checkpoints/matplotlib 05. Pie chart-checkpoint.ipynb | ikelee22/pythonlib | cbf0faf548dfc35d799898178bf7e8c3461e5776 | [
"MIT"
] | null | null | null | matplotlib/.ipynb_checkpoints/matplotlib 05. Pie chart-checkpoint.ipynb | ikelee22/pythonlib | cbf0faf548dfc35d799898178bf7e8c3461e5776 | [
"MIT"
] | null | null | null | matplotlib/.ipynb_checkpoints/matplotlib 05. Pie chart-checkpoint.ipynb | ikelee22/pythonlib | cbf0faf548dfc35d799898178bf7e8c3461e5776 | [
"MIT"
] | null | null | null | 358.724739 | 28,984 | 0.937788 | [
[
[
"<img src='./img/intel-logo.jpg' width=40%, Fig1> \n\n# Pie chart \n<font size=5><b>05. 파이차트 <b></font>\n\n<div align='right'>성 민 석 (Minsuk Sung)</div>\n<div align='right'>류 회 성 (Hoesung Ryu)</div>\n\n<img src='./img/matplotlib_logo.png' width=50%, Fig2> \n\n\n---",
"_____no_output_____"
],
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#간단한-파이차트그리기\" data-toc-modified-id=\"간단한-파이차트그리기-1\"><span class=\"toc-item-num\">1 </span>간단한 파이차트그리기</a></span></li><li><span><a href=\"#파이차트-스타일\" data-toc-modified-id=\"파이차트-스타일-2\"><span class=\"toc-item-num\">2 </span>파이차트 스타일</a></span></li><li><span><a href=\"#파이차트-퍼센트와-같이-표시하기\" data-toc-modified-id=\"파이차트-퍼센트와-같이-표시하기-3\"><span class=\"toc-item-num\">3 </span>파이차트 퍼센트와 같이 표시하기</a></span></li><li><span><a href=\"#파이차트-Explode\" data-toc-modified-id=\"파이차트-Explode-4\"><span class=\"toc-item-num\">4 </span>파이차트 Explode</a></span></li></ul></div>",
"_____no_output_____"
],
[
"## 간단한 파이차트그리기 \n\npie 차트는 `matplotlib.pyplot` 모듈의 `pie 함수`를 사용해 그릴 수 있습니다.\n\npie 함수의 첫 번째 인자는 각 범주가 데이터에서 차지하는 비율을 뜻하며, labels를 통해 범주를 전달할 수 있습니다. ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"exp_vals = [1400,600,300,410,250]\nexp_labels = [\"Home Rent\",\"Food\",\"Phone/Internet Bill\",\"Car \",\"Other Utilities\"]\nplt.pie(exp_vals,labels=exp_labels)",
"_____no_output_____"
]
],
[
[
"## 파이차트 스타일 \n\n- ` axis('equal)` 를 통해 완벽한 원형으로 파이차트를 표시 할 수 있습니다. \n- `shadow=True` 를 통해 pie 차트에 그림자를 설정할 수 있습니다.\n- `startangle=` 를 통해 시작각도를 설정 할 수 있습니다.",
"_____no_output_____"
]
],
[
[
"plt.pie(exp_vals,labels=exp_labels, shadow=True)\nplt.axis(\"equal\")\nplt.show()",
"_____no_output_____"
],
[
"plt.pie(exp_vals,labels=exp_labels, shadow=True,startangle=45)\nplt.axis(\"equal\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 파이차트 퍼센트와 같이 표시하기 \n\n`autopct` 명령어를 통하여 퍼센트를 표시 할수 있으며, 소수점 자리수도 설정 할 수 있습니다.",
"_____no_output_____"
]
],
[
[
"plt.pie(exp_vals,\n labels=exp_labels,\n shadow=True,\n autopct='%1.1f%%', # 퍼센트 표시하기\n radius=1.5)\nplt.axis(\"equal\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 파이차트 Explode\n\n`explode` 명령어로 파이조각이 돌출되는 크기를 설정 할 수있으며, 0이면 돌출되지 않습니다. ",
"_____no_output_____"
]
],
[
[
"plt.axis(\"equal\")\nplt.pie(exp_vals,\n labels=exp_labels,\n shadow=True,\n autopct='%1.1f%%',\n radius=1.5,\n explode=[0,0,0,0.1,0.2],\n startangle=45)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74f3675a9cf2b69faedbe962b17fe829e9e48d5 | 14,254 | ipynb | Jupyter Notebook | pandas_practice.ipynb | jebouchard/School_District-_Analysis | 0173b3bf597787542f40df74cb8a386176b2aee5 | [
"MIT"
] | null | null | null | pandas_practice.ipynb | jebouchard/School_District-_Analysis | 0173b3bf597787542f40df74cb8a386176b2aee5 | [
"MIT"
] | null | null | null | pandas_practice.ipynb | jebouchard/School_District-_Analysis | 0173b3bf597787542f40df74cb8a386176b2aee5 | [
"MIT"
] | null | null | null | 27.517375 | 386 | 0.42781 | [
[
[
"# List of high schools\nhigh_schools = [\"Hernandez High School\", \"Figueroa High School\",\n \"Wilson High School\",\"Wright High School\"]",
"_____no_output_____"
],
[
"for school in high_schools:\n print(school)",
"Hernandez High School\nFigueroa High School\nWilson High School\nWright High School\n"
],
[
"# A dictionary of high schools and the type of school.\nhigh_school_types = [{\"High School\": \"Griffin\", \"Type\":\"District\"},\n {\"High School\": \"Figueroa\", \"Type\": \"District\"},\n {\"High School\": \"Wilson\", \"Type\": \"Charter\"},\n {\"High School\": \"Wright\", \"Type\": \"Charter\"}]",
"_____no_output_____"
],
[
"print(high_school_types)",
"[{'High School': 'Griffin', 'Type': 'District'}, {'High School': 'Figueroa', 'Type': 'District'}, {'High School': 'Wilson', 'Type': 'Charter'}, {'High School': 'Wright', 'Type': 'Charter'}]\n"
],
[
"# List of high schools\nhigh_schools = [\"Huang High School\", \"Figueroa High School\", \"Shelton High School\", \"Hernandez High School\",\"Griffin High School\",\"Wilson High School\", \"Cabrera High School\", \"Bailey High School\", \"Holden High School\", \"Pena High School\", \"Wright High School\",\"Rodriguez High School\", \"Johnson High School\", \"Ford High School\", \"Thomas High School\"]",
"_____no_output_____"
],
[
"# Add the Pandas dependency.\nimport pandas as pd",
"_____no_output_____"
],
[
"# Create a Pandas Series from a list.\nschool_series = pd.Series(high_schools)\nschool_series",
"_____no_output_____"
],
[
"# A dictionary of high schools\nhigh_school_dicts = [{\"School ID\": 0, \"school_name\": \"Huang High School\", \"type\": \"District\"},\n {\"School ID\": 1, \"school_name\": \"Figueroa High School\", \"type\": \"District\"},\n {\"School ID\": 2, \"school_name\":\"Shelton High School\", \"type\": \"Charter\"},\n {\"School ID\": 3, \"school_name\":\"Hernandez High School\", \"type\": \"District\"},\n {\"School ID\": 4, \"school_name\":\"Griffin High School\", \"type\": \"Charter\"}] ",
"_____no_output_____"
],
[
"school_df = pd.DataFrame(high_school_dicts)\nschool_df",
"_____no_output_____"
],
[
"# Three separate lists of information on high schools\nschool_id = [0, 1, 2, 3, 4]\n\nschool_name = [\"Huang High School\", \"Figueroa High School\",\n\"Shelton High School\", \"Hernandez High School\",\"Griffin High School\"]\n\ntype_of_school = [\"District\", \"District\", \"Charter\", \"District\",\"Charter\"]",
"_____no_output_____"
],
[
"# Initialize a new DataFrame.\nschools_df = pd.DataFrame()",
"_____no_output_____"
],
[
"# Add the lists to a new DataFrame.\nschools_df[\"School ID\"] = school_id\nschools_df[\"School Name\"] = school_name\nschools_df[\"Type\"] = type_of_school\n\n# Print the DataFrame.\nschools_df",
"_____no_output_____"
],
[
"# Create a dictionary of information on high schools.\nhigh_schools_dict = {'School ID': school_id, 'school_name':school_name, 'type':type_of_school}",
"_____no_output_____"
],
[
"high_schools_df = pd.DataFrame(high_schools_dict)\nhigh_schools_df",
"_____no_output_____"
],
[
"school_df.columns",
"_____no_output_____"
],
[
"school_df.index",
"_____no_output_____"
],
[
"school_df.values",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74f3fa3fbf18d86ef6721758b37433458fb559e | 56,937 | ipynb | Jupyter Notebook | Convolutional Neural Networks/Convolution model-Step by Step-v2.ipynb | xuxingya/deep-learning-coursera | 1c2a2da974afab132d809449c99ad87cf8839855 | [
"MIT"
] | 1 | 2018-03-29T05:25:59.000Z | 2018-03-29T05:25:59.000Z | Convolutional Neural Networks/Convolution model-Step by Step-v2.ipynb | xuxingya/deep-learning-coursera | 1c2a2da974afab132d809449c99ad87cf8839855 | [
"MIT"
] | null | null | null | Convolutional Neural Networks/Convolution model-Step by Step-v2.ipynb | xuxingya/deep-learning-coursera | 1c2a2da974afab132d809449c99ad87cf8839855 | [
"MIT"
] | null | null | null | 41.988938 | 5,306 | 0.561744 | [
[
[
"# Convolutional Neural Networks: Step by Step\n\nWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. \n\n**Notation**:\n- Superscript $[l]$ denotes an object of the $l^{th}$ layer. \n - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n\n\n- Superscript $(i)$ denotes an object from the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example input.\n \n \n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.\n \n \n- $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. \n- $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. \n\nWe assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!",
"_____no_output_____"
],
[
"## 1 - Packages\n\nLet's first import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)",
"_____no_output_____"
]
],
[
[
"## 2 - Outline of the Assignment\n\nYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:\n\n- Convolution functions, including:\n - Zero Padding\n - Convolve window \n - Convolution forward\n - Convolution backward (optional)\n- Pooling functions, including:\n - Pooling forward\n - Create mask \n - Distribute value\n - Pooling backward (optional)\n \nThis notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:\n\n<img src=\"images/model.png\" style=\"width:800px;height:300px;\">\n\n**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. ",
"_____no_output_____"
],
[
"## 3 - Convolutional Neural Networks\n\nAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. \n\n<img src=\"images/conv_nn.png\" style=\"width:350px;height:200px;\">\n\nIn this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. ",
"_____no_output_____"
],
[
"### 3.1 - Zero-Padding\n\nZero-padding adds zeros around the border of an image:\n\n<img src=\"images/PAD.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>\n\nThe main benefits of padding are the following:\n\n- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the \"same\" convolution, in which the height/width is exactly preserved after one layer. \n\n- It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.\n\n**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array \"a\" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:\n```python\na = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))\n```",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: zero_pad\n\ndef zero_pad(X, pad):\n \"\"\"\n Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, \n as illustrated in Figure 1.\n \n Argument:\n X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images\n pad -- integer, amount of padding around each image on vertical and horizontal dimensions\n \n Returns:\n X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line)\n X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0)\n ### END CODE HERE ###\n \n return X_pad",
"_____no_output_____"
],
[
"np.random.seed(1)\nx = np.random.randn(4, 3, 3, 2)\nx_pad = zero_pad(x, 2)\nprint (\"x.shape =\", x.shape)\nprint (\"x_pad.shape =\", x_pad.shape)\nprint (\"x[1,1] =\", x[1,1])\nprint (\"x_pad[1,1] =\", x_pad[1,1])\n\nfig, axarr = plt.subplots(1, 2)\naxarr[0].set_title('x')\naxarr[0].imshow(x[0,:,:,0])\naxarr[1].set_title('x_pad')\naxarr[1].imshow(x_pad[0,:,:,0])",
"x.shape = (4, 3, 3, 2)\nx_pad.shape = (4, 7, 7, 2)\nx[1,1] = [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\nx_pad[1,1] = [[ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **x.shape**:\n </td>\n <td>\n (4, 3, 3, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x_pad.shape**:\n </td>\n <td>\n (4, 7, 7, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x[1,1]**:\n </td>\n <td>\n [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\n </td>\n </tr>\n <tr>\n <td>\n **x_pad[1,1]**:\n </td>\n <td>\n [[ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 3.2 - Single step of convolution \n\nIn this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: \n\n- Takes an input volume \n- Applies a filter at every position of the input\n- Outputs another volume (usually of different size)\n\n<img src=\"images/Convolution_schematic.gif\" style=\"width:500px;height:300px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>\n\nIn a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. \n\nLater in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. \n\n**Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: conv_single_step\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation \n of the previous layer.\n \n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n \n Returns:\n Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n ### START CODE HERE ### (≈ 2 lines of code)\n # Element-wise product between a_slice and W. Do not add the bias yet.\n s = np.multiply(a_slice_prev, W)\n # Sum over all entries of the volume s.\n Z = np.sum(s)\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n Z = Z + float(b)\n ### END CODE HERE ###\n\n return Z",
"_____no_output_____"
],
[
"np.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)",
"Z = -6.99908945068\n"
]
],
[
[
"**Expected Output**:\n<table>\n <tr>\n <td>\n **Z**\n </td>\n <td>\n -6.99908945068\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 3.3 - Convolutional Neural Networks - Forward pass\n\nIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: \n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/conv_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n**Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. \n\n**Hint**: \n1. To select a 2x2 slice at the upper left corner of a matrix \"a_prev\" (shape (5,5,3)), you would do:\n```python\na_slice_prev = a_prev[0:2,0:2,:]\n```\nThis will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.\n2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.\n\n<img src=\"images/vert_horiz_kiank.png\" style=\"width:400px;height:300px;\">\n<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>\n\n\n**Reminder**:\nThe formulas relating the output shape of the convolution to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_C = \\text{number of filters used in the convolution}$$\n\nFor this exercise, we won't worry about vectorization, and will just implement everything with for-loops.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: conv_forward\n\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n \n Arguments:\n A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n \n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from A_prev's shape (≈1 line) \n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape (≈1 line)\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)\n n_H = int((n_H_prev - f + 2 * pad) / stride) + 1\n n_W = int((n_W_prev - f + 2 * pad) / stride) + 1\n \n # Initialize the output volume Z with zeros. (≈1 line)\n Z = np.zeros((m, n_H, n_W, n_C))\n \n # Create A_prev_pad by padding A_prev\n A_prev_pad = zero_pad(A_prev, pad)\n \n for i in range(m): # loop over the batch of training examples\n a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation\n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over channels (= #filters) of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n \n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[...,c], b[...,c])\n \n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(Z.shape == (m, n_H, n_W, n_C))\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache",
"_____no_output_____"
],
[
"np.random.seed(1)\nA_prev = np.random.randn(10,4,4,3)\nW = np.random.randn(2,2,3,8)\nb = np.random.randn(1,1,1,8)\nhparameters = {\"pad\" : 2,\n \"stride\": 2}\n\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\nprint(\"Z's mean =\", np.mean(Z))\nprint(\"Z[3,2,1] =\", Z[3,2,1])\nprint(\"cache_conv[0][1][2][3] =\", cache_conv[0][1][2][3])",
"Z's mean = 0.0489952035289\nZ[3,2,1] = [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\ncache_conv[0][1][2][3] = [-0.20075807 0.18656139 0.41005165]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Z's mean**\n </td>\n <td>\n 0.0489952035289\n </td>\n </tr>\n <tr>\n <td>\n **Z[3,2,1]**\n </td>\n <td>\n [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\n </td>\n </tr>\n <tr>\n <td>\n **cache_conv[0][1][2][3]**\n </td>\n <td>\n [-0.20075807 0.18656139 0.41005165]\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"Finally, CONV layer should also contain an activation, in which case we would add the following line of code:\n\n```python\n# Convolve the window to get back one output neuron\nZ[i, h, w, c] = ...\n# Apply activation\nA[i, h, w, c] = activation(Z[i, h, w, c])\n```\n\nYou don't need to do it here. \n",
"_____no_output_____"
],
[
"## 4 - Pooling layer \n\nThe pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: \n\n- Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.\n\n- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.\n\n<table>\n<td>\n<img src=\"images/max_pool1.png\" style=\"width:500px;height:300px;\">\n<td>\n\n<td>\n<img src=\"images/a_pool.png\" style=\"width:500px;height:300px;\">\n<td>\n</table>\n\nThese pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. \n\n### 4.1 - Forward Pooling\nNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. \n\n**Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.\n\n**Reminder**:\nAs there's no padding, the formulas binding the output shape of the pooling to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_C = n_{C_{prev}}$$",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: pool_forward\n\ndef pool_forward(A_prev, hparameters, mode = \"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n \n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters \n \"\"\"\n \n # Retrieve dimensions from the input shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters from \"hparameters\"\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n ### START CODE HERE ###\n for i in range(m): # loop over the training examples\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n for c in range (n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n \n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.mean(a_prev_slice)\n \n ### END CODE HERE ###\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache",
"_____no_output_____"
],
[
"np.random.seed(1)\nA_prev = np.random.randn(2, 4, 4, 3)\nhparameters = {\"stride\" : 2, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters)\nprint(\"mode = max\")\nprint(\"A =\", A)\nprint()\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A =\", A)",
"mode = max\nA = [[[[ 1.74481176 0.86540763 1.13376944]]]\n\n\n [[[ 1.13162939 1.51981682 2.18557541]]]]\n\nmode = average\nA = [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n"
]
],
[
[
"**Expected Output:**\n<table>\n\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 1.74481176 0.86540763 1.13376944]]]\n\n\n [[[ 1.13162939 1.51981682 2.18557541]]]]\n\n </td>\n </tr>\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. \n\nThe remainer of this notebook is optional, and will not be graded.\n",
"_____no_output_____"
],
[
"## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)\n\nIn modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. \n\nWhen in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.\n\n### 5.1 - Convolutional layer backward pass \n\nLet's start by implementing the backward pass for a CONV layer. \n\n#### 5.1.1 - Computing dA:\nThis is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:\n\n$$ dA += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^{n_W} W_c \\times dZ_{hw} \\tag{1}$$\n\nWhere $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\nda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n```\n\n#### 5.1.2 - Computing dW:\nThis is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:\n\n$$ dW_c += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^ {n_W} a_{slice} \\times dZ_{hw} \\tag{2}$$\n\nWhere $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndW[:,:,:,c] += a_slice * dZ[i, h, w, c]\n```\n\n#### 5.1.3 - Computing db:\n\nThis is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:\n\n$$ db = \\sum_h \\sum_w dZ_{hw} \\tag{3}$$\n\nAs you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndb[:,:,:,c] += dZ[i, h, w, c]\n```\n\n**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above. ",
"_____no_output_____"
]
],
[
[
"def conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n \n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n \n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve information from \"cache\"\n (A_prev, W, b, hparameters) = None\n \n # Retrieve dimensions from A_prev's shape\n (m, n_H_prev, n_W_prev, n_C_prev) = None\n \n # Retrieve dimensions from W's shape\n (f, f, n_C_prev, n_C) = None\n \n # Retrieve information from \"hparameters\"\n stride = None\n pad = None\n \n # Retrieve dimensions from dZ's shape\n (m, n_H, n_W, n_C) = None\n \n # Initialize dA_prev, dW, db with the correct shapes\n dA_prev = None \n dW = None\n db = None\n\n # Pad A_prev and dA_prev\n A_prev_pad = None\n dA_prev_pad = None\n \n for i in range(None): # loop over the training examples\n \n # select ith training example from A_prev_pad and dA_prev_pad\n a_prev_pad = None\n da_prev_pad = None\n \n for h in range(None): # loop over vertical axis of the output volume\n for w in range(None): # loop over horizontal axis of the output volume\n for c in range(None): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\"\n vert_start = None\n vert_end = None\n horiz_start = None\n horiz_end = None\n \n # Use the corners to define the slice from a_prev_pad\n a_slice = None\n\n # Update gradients for the window and the filter's parameters using the code formulas given above\n da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None\n dW[:,:,:,c] += None\n db[:,:,:,c] += None\n \n # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\n dA_prev[i, :, :, :] = None\n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))\n \n return dA_prev, dW, db",
"_____no_output_____"
],
[
"np.random.seed(1)\ndA, dW, db = conv_backward(Z, cache_conv)\nprint(\"dA_mean =\", np.mean(dA))\nprint(\"dW_mean =\", np.mean(dW))\nprint(\"db_mean =\", np.mean(db))",
"_____no_output_____"
]
],
[
[
"** Expected Output: **\n<table>\n <tr>\n <td>\n **dA_mean**\n </td>\n <td>\n 1.45243777754\n </td>\n </tr>\n <tr>\n <td>\n **dW_mean**\n </td>\n <td>\n 1.72699145831\n </td>\n </tr>\n <tr>\n <td>\n **db_mean**\n </td>\n <td>\n 7.83923256462\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"## 5.2 Pooling layer - backward pass\n\nNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. \n\n### 5.2.1 Max pooling - backward pass \n\nBefore jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: \n\n$$ X = \\begin{bmatrix}\n1 && 3 \\\\\n4 && 2\n\\end{bmatrix} \\quad \\rightarrow \\quad M =\\begin{bmatrix}\n0 && 0 \\\\\n1 && 0\n\\end{bmatrix}\\tag{4}$$\n\nAs you can see, this function creates a \"mask\" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. \n\n**Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. \nHints:\n- [np.max()]() may be helpful. It computes the maximum of an array.\n- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:\n```\nA[i,j] = True if X[i,j] = x\nA[i,j] = False if X[i,j] != x\n```\n- Here, you don't need to consider cases where there are several maxima in a matrix.",
"_____no_output_____"
]
],
[
[
"def create_mask_from_window(x):\n \"\"\"\n Creates a mask from an input matrix x, to identify the max entry of x.\n \n Arguments:\n x -- Array of shape (f, f)\n \n Returns:\n mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n mask = None\n ### END CODE HERE ###\n \n return mask",
"_____no_output_____"
],
[
"np.random.seed(1)\nx = np.random.randn(2,3)\nmask = create_mask_from_window(x)\nprint('x = ', x)\nprint(\"mask = \", mask)",
"_____no_output_____"
]
],
[
[
"**Expected Output:** \n\n<table> \n<tr> \n<td>\n\n**x =**\n</td>\n\n<td>\n\n[[ 1.62434536 -0.61175641 -0.52817175] <br>\n [-1.07296862 0.86540763 -2.3015387 ]]\n\n </td>\n</tr>\n\n<tr> \n<td>\n**mask =**\n</td>\n<td>\n[[ True False False] <br>\n [False False False]]\n</td>\n</tr>\n\n\n</table>",
"_____no_output_____"
],
[
"Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will \"propagate\" the gradient back to this particular input value that had influenced the cost. ",
"_____no_output_____"
],
[
"### 5.2.2 - Average pooling - backward pass \n\nIn max pooling, for each input window, all the \"influence\" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.\n\nFor example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: \n$$ dZ = 1 \\quad \\rightarrow \\quad dZ =\\begin{bmatrix}\n1/4 && 1/4 \\\\\n1/4 && 1/4\n\\end{bmatrix}\\tag{5}$$\n\nThis implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. \n\n**Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)",
"_____no_output_____"
]
],
[
[
"def distribute_value(dz, shape):\n \"\"\"\n Distributes the input value in the matrix of dimension shape\n \n Arguments:\n dz -- input scalar\n shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz\n \n Returns:\n a -- Array of size (n_H, n_W) for which we distributed the value of dz\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from shape (≈1 line)\n (n_H, n_W) = None\n \n # Compute the value to distribute on the matrix (≈1 line)\n average = None\n \n # Create a matrix where every entry is the \"average\" value (≈1 line)\n a = None\n ### END CODE HERE ###\n \n return a",
"_____no_output_____"
],
[
"a = distribute_value(2, (2,2))\nprint('distributed value =', a)",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\n<table> \n<tr> \n<td>\ndistributed_value =\n</td>\n<td>\n[[ 0.5 0.5]\n<br\\> \n[ 0.5 0.5]]\n</td>\n</tr>\n</table>",
"_____no_output_____"
],
[
"### 5.2.3 Putting it together: Pooling backward \n\nYou now have everything you need to compute backward propagation on a pooling layer.\n\n**Exercise**: Implement the `pool_backward` function in both modes (`\"max\"` and `\"average\"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.",
"_____no_output_____"
]
],
[
[
"def pool_backward(dA, cache, mode = \"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n \n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters \n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n \n ### START CODE HERE ###\n \n # Retrieve information from cache (≈1 line)\n (A_prev, hparameters) = None\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n stride = None\n f = None\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n m, n_H_prev, n_W_prev, n_C_prev = None\n m, n_H, n_W, n_C = None\n \n # Initialize dA_prev with zeros (≈1 line)\n dA_prev = None\n \n for i in range(None): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n a_prev = None\n \n for h in range(None): # loop on the vertical axis\n for w in range(None): # loop on the horizontal axis\n for c in range(None): # loop over the channels (depth)\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = None\n vert_end = None\n horiz_start = None\n horiz_end = None\n \n # Compute the backward propagation in both modes.\n if mode == \"max\":\n \n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n a_prev_slice = None\n # Create the mask from a_prev_slice (≈1 line)\n mask = None\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n elif mode == \"average\":\n \n # Get the value a from dA (≈1 line)\n da = None\n # Define the shape of the filter as fxf (≈1 line)\n shape = None\n # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n ### END CODE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == A_prev.shape)\n \n return dA_prev",
"_____no_output_____"
],
[
"np.random.seed(1)\nA_prev = np.random.randn(5, 5, 3, 2)\nhparameters = {\"stride\" : 1, \"f\": 2}\nA, cache = pool_forward(A_prev, hparameters)\ndA = np.random.randn(5, 4, 2, 2)\n\ndA_prev = pool_backward(dA, cache, mode = \"max\")\nprint(\"mode = max\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) \nprint()\ndA_prev = pool_backward(dA, cache, mode = \"average\")\nprint(\"mode = average\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) ",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\nmode = max:\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0. 0. ] <br>\n [ 5.05844394 -1.68282702] <br>\n [ 0. 0. ]]\n</td>\n</tr>\n</table>\n\nmode = average\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0.08485462 0.2787552 ] <br>\n [ 1.26461098 -0.25749373] <br>\n [ 1.17975636 -0.53624893]]\n</td>\n</tr>\n</table>",
"_____no_output_____"
],
[
"### Congratulations !\n\nCongratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e74f433b7955906c57db6f128ede1ffdd8eaf27d | 5,588 | ipynb | Jupyter Notebook | doc/user_guide.ipynb | ShekiLyu/lixinger-openapi | 6194e371de4aeb963594642126ea8f306c3a170f | [
"Apache-2.0"
] | 44 | 2018-05-23T07:04:32.000Z | 2022-01-16T13:28:09.000Z | doc/user_guide.ipynb | ShekiLyu/lixinger-openapi | 6194e371de4aeb963594642126ea8f306c3a170f | [
"Apache-2.0"
] | 14 | 2018-05-21T13:56:20.000Z | 2021-01-10T12:46:09.000Z | doc/user_guide.ipynb | ShekiLyu/lixinger-openapi | 6194e371de4aeb963594642126ea8f306c3a170f | [
"Apache-2.0"
] | 13 | 2018-10-31T08:59:11.000Z | 2022-02-23T01:59:07.000Z | 22.623482 | 273 | 0.454545 | [
[
[
"# 用户使用指南\n\n## 安装\n### 从PyPI安装\n`pip install lixinger-openapi`\n\n### 从Github安装\n`pip install git+http://github.com/ShekiLyu/lixinger-openapi.git`\n\n### 从PyPI更新\n`pip install --upgrade lixinger-openapi`\n\n### 从Github更新\n`pip install --upgrade git+http://github.com/ShekiLyu/lixinger-openapi.git`\n\n## 接口列表\n接口名 | 接口功能\n------------------- | -------------------------\nset\\_token | 设置token\nquery\\_json | 查询数据(json格式)\nquery\\_dataframe | 查询数据(dataframe格式)\n\n\n## 使用方法\n\n### 引入包",
"_____no_output_____"
]
],
[
[
"import lixinger_openapi as lo",
"_____no_output_____"
]
],
[
[
"### 加载token",
"_____no_output_____"
]
],
[
[
"lo.set_token(\"your_token\")",
"_____no_output_____"
]
],
[
[
"set_token会目录下生成token.cfg文件保存token,所以在当前目录只需加载一次。如果不想写token.cfg文件,可以如下设置:",
"_____no_output_____"
]
],
[
[
"lo.set_token(\"your_token\", write_token=False)",
"_____no_output_____"
]
],
[
[
"### 查询(使用理杏仁开放平台上的示例)\n#### A股公司基本面数据\n##### json格式",
"_____no_output_____"
]
],
[
[
"json_rlt = lo.query_json('a.stock.fundamental.non_financial', \n {\n \"date\": \"2018-01-19\",\n \"stockCodes\": [\n \"000028\",\n \"600511\"\n ],\n \"metricsList\": [\n \"pe_ttm\",\n \"mc\"\n ]\n })\nprint(json_rlt)",
"{'data': [{'date': '2018-01-19T00:00:00+08:00', 'pe_ttm': 21.046568599508507, 'stockCode': '000028', 'mc': 26663748314.4}, {'date': '2018-01-19T00:00:00+08:00', 'pe_ttm': 21.459988206744743, 'stockCode': '600511', 'mc': 20346751061}], 'code': 0, 'msg': 'success'}\n"
]
],
[
[
"##### dataframe格式",
"_____no_output_____"
]
],
[
[
"dataframe_rlt = lo.query_dataframe('a.stock.fundamental.non_financial', \n {\n \"date\": \"2018-01-19\",\n \"metricsList\": [\"pe_ttm\", \"mc\"],\n \"stockCodes\": [\"000028\", \"600511\"]\n })\nprint('code: '+ str(dataframe_rlt['code']))\nprint('\\ndata:')\nprint(dataframe_rlt['data'])\nprint('\\nmsg: ' + dataframe_rlt['msg'])",
"code: 0\n\ndata:\n date mc pe_ttm stockCode\n0 2018-01-19T00:00:00+08:00 2.666375e+10 21.046569 000028\n1 2018-01-19T00:00:00+08:00 2.034675e+10 21.459988 600511\n\nmsg: success\n"
]
],
[
[
"#### A股指数基本信息\n##### json格式",
"_____no_output_____"
]
],
[
[
"json_rlt = lo.query_json('a.index', {\n \"stockCodes\": [\n \"000016\"\n ]\n})\nprint(json_rlt)",
"{'data': [{'source': 'sh', 'cnName': '上证50', 'publishDate': '2004-01-01T16:00:00.000Z', 'stockCode': '000016', 'areaCode': 'cn', 'market': 'a'}], 'code': 0, 'msg': 'success'}\n"
]
],
[
[
"##### dataframe格式",
"_____no_output_____"
]
],
[
[
"dataframe_rlt = lo.query_dataframe('a.index', {\n \"stockCodes\": [\n \"000016\"\n ]\n})\nprint('code: '+ str(dataframe_rlt['code']))\nprint('\\ndata:')\nprint(dataframe_rlt['data'])\nprint('\\nmsg: ' + dataframe_rlt['msg'])",
"code: 0\n\ndata:\n areaCode cnName market publishDate source stockCode\n0 cn 上证50 a 2004-01-01T16:00:00.000Z sh 000016\n\nmsg: success\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74f671b3f5af8a595ce03d0673c5e737c3af29e | 12,048 | ipynb | Jupyter Notebook | PETA_report_template__Python__Product_statistics.ipynb | JaylanLiu/PETA_report_template__Python__Product_statistics | 3401b8d4ed8320a3ffbfc41be4e77b619121f298 | [
"MIT"
] | null | null | null | PETA_report_template__Python__Product_statistics.ipynb | JaylanLiu/PETA_report_template__Python__Product_statistics | 3401b8d4ed8320a3ffbfc41be4e77b619121f298 | [
"MIT"
] | null | null | null | PETA_report_template__Python__Product_statistics.ipynb | JaylanLiu/PETA_report_template__Python__Product_statistics | 3401b8d4ed8320a3ffbfc41be4e77b619121f298 | [
"MIT"
] | 1 | 2021-11-16T07:14:26.000Z | 2021-11-16T07:14:26.000Z | 28.149533 | 496 | 0.53071 | [
[
[
"token='c5b8bd2c-0f8b-4edd-b43d-65245f'\njson_str='{\"studyIds\":[\"tianhua_system_DX1901\",\"tianhua_system_DX1902\",\"tianhua_system_DX1947\"],\"pageSize\":999999,\"pageIndex\":1,\"attributesRangeFilters\":[],\"attributesEqualFilters\":[],\"attributesDateFilters\":[],\"mutationFilter\":{\"hugoGeneSymbols\":[],\"mutationType\":[],\"variantSource\":[\"Somatic\"],\"hasDrug\":[],\"clinsig\":[],\"snvFilter\":{\"exacStart\":\"\",\"exadEnd\":\"\",\"vabundStart\":\"\",\"vabundEnd\":\"\",\"variantClass\":[],\"searchStr\":\"\"}}}'\nhost='https://peta.bgi.com/api'",
"_____no_output_____"
],
[
"import pandas as pd\nfrom pypeta import Peta\nimport pypeta\nimport plotly.express as px\nimport json\nimport numpy as np\nfrom IPython.display import display\nfrom contextlib import redirect_stderr\nimport io\nfrom pyCDxAnalysis import CDx_Data\nfrom datetime import datetime\n\npd.set_option('precision', 2)",
"_____no_output_____"
],
[
"# fetch data\ntry:\n cdx = CDx_Data()\n filter_description = cdx.from_PETA(json_str=json_str,\n token=token,\n host=host)\nexcept:\n print('Failed to fetch data.')",
"_____no_output_____"
]
],
[
[
"# 肿瘤个体化诊疗基因检测统计",
"_____no_output_____"
],
[
"## 样本选取条件",
"_____no_output_____"
]
],
[
[
"try:\n print(cdx.filter_description())\n print(f'样本总量为{cdx.sample_size()}例。')\nexcept Exception as e:\n print(e)",
"_____no_output_____"
]
],
[
[
"## 送检样本量",
"_____no_output_____"
]
],
[
[
"try:\n scdx=cdx\n chosen_cancer_types='肾癌/胃肠道间质瘤/胰腺癌/食管癌/神经内分泌肿瘤/非小细胞肺癌/甲状腺癌/睾丸癌/胃癌/卵巢癌/膀胱癌/恶性胸膜间皮瘤/乳腺癌/阴茎癌/黑色素瘤/胆管癌/胆囊癌/肝细胞癌/软组织肉瘤/胸腺癌/胸腺瘤/骨癌/子宫肿瘤/结直肠癌/中枢神经系统肿瘤/非黑色素瘤皮肤癌/小细胞肺癌/宫颈癌/头颈癌/前列腺癌/外阴癌/肛门癌/小肠腺癌/默克尔细胞癌/不分癌种'.split('/')\n\n value_counts=scdx.sample_size('CANCER_TYPE').reindex(chosen_cancer_types).fillna(0).sort_values(ascending=False)\n\n fig= px.pie(values=value_counts,names=value_counts.index)\n fig.update_traces( texttemplate = \"%{label}: %{value} <br>%{percent:.2%}\")\n #fig.update_layout(showlegend=False)\n fig.update_traces(textposition='inside')\n fig.update_layout(\n uniformtext_minsize=10, # 文本信息最小值\n uniformtext_mode='hide')\n fig.show()\nexcept:\n print('Data selected don`t support this calculation.')",
"_____no_output_____"
]
],
[
[
"## 药物检测阳性率",
"_____no_output_____"
]
],
[
[
"#判断数据集是否支持药物阳性率的统计\ntry:\n scdx=cdx\n support_for_drug_sensitivity=False\n if 'GENETIC_TEST_RESULT' in scdx.cli.columns:\n support_for_drug_sensitivity=True\n\n\n if support_for_drug_sensitivity:\n pr=pypeta.positive_rate(scdx.cli.GENETIC_TEST_RESULT,['阳性'])\n\n print(f'总例数为{pr[0]},其中有效{pr[1]}例,阳性率为{pr[2]:8.2%}。')\n else:\n print(\"数据不支持该选项。\")\nexcept:\n print('Data selected don`t support this calculation.')",
"_____no_output_____"
],
[
"f = io.StringIO()\nwith redirect_stderr(f):\n cli=scdx.cli.copy()\n try:\n x=cli['GENETIC_TEST_RESULT'].groupby(cli['CANCER_TYPE']).apply(lambda x: pypeta.positive_rate(x,['阳性'])[2]).sort_values(ascending=False)\n\n xdf=pd.DataFrame(x).reset_index()\n\n\n print('各个癌种的药物阳性率为:')\n fig = px.bar(xdf, x='CANCER_TYPE', y='GENETIC_TEST_RESULT',text='GENETIC_TEST_RESULT',labels={\n 'CANCER_TYPE':'癌种',\n 'GENETIC_TEST_RESULT':'药物检出阳性率',\n })\n #fig.update_traces(texttemplate='%{text:%.2f%%}', textposition='outside',)\n fig.update_traces(texttemplate='%{text:.2%}', textposition='outside',)\n fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')\n fig.update_layout()\n fig.show()\n except:\n print('Data selected don`t support this calculation.')",
"_____no_output_____"
]
],
[
[
"## 基因检出率 ",
"_____no_output_____"
]
],
[
[
"try:\n ser=cdx.test_positive_rate(groupby_genes=True)\n mut_freq_per_gene_df=ser.sort_values(ascending=False).reset_index()\n\n mut_freq_per_gene_df.columns=pd.Index(['基因','频率'])\n print('各基因的检出率为:')\n fig = px.bar(mut_freq_per_gene_df, x='基因', y='频率',text='频率')\n #fig.update_traces(texttemplate='%{text:%.2f%%}', textposition='outside',)\n fig.update_traces(texttemplate='%{text:.2%}', textposition='outside',)\n fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')\n fig.show()\nexcept:\n print('Data selected don`t support this calculation.')",
"_____no_output_____"
]
],
[
[
"## 基因突变类型检出率",
"_____no_output_____"
]
],
[
[
"try:\n mut_freq_per_gene_df=cdx.test_positive_rate(groupby_variant_type=True).reset_index()\n\n mut_freq_per_gene_df.columns=pd.Index(['类型','频率'])\n print('各类型的检出率为:')\n fig = px.bar(mut_freq_per_gene_df, x='类型', y='频率',text='频率')\n #fig.update_traces(texttemplate='%{text:%.2f%%}', textposition='outside',)\n fig.update_traces(texttemplate='%{text:.2%}', textposition='outside',)\n fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')\n fig.show()\nexcept:\n print('Data selected don`t support this calculation.')",
"_____no_output_____"
]
],
[
[
"## TMB分布 ",
"_____no_output_____"
]
],
[
[
"try:\n cdx_tmb=cdx\n\n chosen_cancer_types='肾癌/胃肠道间质瘤/胰腺癌/食管癌/神经内分泌肿瘤/非小细胞肺癌/甲状腺癌/睾丸癌/胃癌/卵巢癌/膀胱癌/恶性胸膜间皮瘤/乳腺癌/阴茎癌/黑色素瘤/胆管癌/胆囊癌/肝细胞癌/软组织肉瘤/胸腺癌/胸腺瘤/骨癌/子宫肿瘤/结直肠癌/中枢神经系统肿瘤/非黑色素瘤皮肤癌/小细胞肺癌/宫颈癌/头颈癌/前列腺癌/外阴癌/肛门癌/小肠腺癌/默克尔细胞癌/不分癌种'.split('/')\n\n cli=cdx_tmb.cli[cdx_tmb.cli.TMB.map(lambda x: pypeta.is_float(x))].copy()\n\n cli=cli[cli.CANCER_TYPE.isin(chosen_cancer_types)].copy()\n\n cli.TMB=cli.TMB.astype('float')\n\n display(cli.TMB.groupby(cli.CANCER_TYPE).describe())\n \n # fig1\n fig=px.histogram( x=cli.TMB,labels={'x':'TMB','y':'百分比'},histnorm='probability density',marginal=\"rug\",)\n fig.show()\n \n fig=px.violin(cli,x='CANCER_TYPE', y=\"TMB\", box=True, # draw box plot inside the violin\n points='all', # can be 'outliers', or False\n )\n fig.show()\n \nexcept Exception as e:\n print(e)",
"_____no_output_____"
]
],
[
[
"## MSI分布",
"_____no_output_____"
]
],
[
[
"try: \n cdx_tmb=cdx\n\n chosen_cancer_types='肾癌/胃肠道间质瘤/胰腺癌/食管癌/神经内分泌肿瘤/非小细胞肺癌/甲状腺癌/睾丸癌/胃癌/卵巢癌/膀胱癌/恶性胸膜间皮瘤/乳腺癌/阴茎癌/黑色素瘤/胆管癌/胆囊癌/肝细胞癌/软组织肉瘤/胸腺癌/胸腺瘤/骨癌/子宫肿瘤/结直肠癌/中枢神经系统肿瘤/非黑色素瘤皮肤癌/小细胞肺癌/宫颈癌/头颈癌/前列腺癌/外阴癌/肛门癌/小肠腺癌/默克尔细胞癌/不分癌种'.split('/')\n\n cli=cli[cli.CANCER_TYPE.isin(chosen_cancer_types)].copy()\n\n\n \n\n cli=cli.dropna(subset=['MSI_STATUS'])\n cli=cli[cli.MSI_STATUS.isin(['MSI-L','MSI-H','MSS'])]\n\n display(cli.MSI_STATUS.groupby(cli.CANCER_TYPE).value_counts().unstack(level=1).fillna(0))\n\n vc=cli.MSI_STATUS.value_counts()\n tdf=vc.reindex(set(vc.index).union(set(['MSI-L','MSI-H','MSS']))).fillna(0).reset_index()\n fig= px.pie(tdf, values='MSI_STATUS', names='index')\n fig.update_traces( texttemplate = \"%{label}: %{value} <br>%{percent:.2%}\")\n fig.update_layout(showlegend=False)\n fig.show()\n\n tmp=cli.MSI_STATUS.groupby(cli.CANCER_TYPE).value_counts().reset_index('CANCER_TYPE')\n\n tmp=tmp.rename(columns={'MSI_STATUS':'COUNT'})\n\n tmp=tmp.reset_index()\n\n print('分癌种的MSI分布如下:')\n fig=px.sunburst(tmp,path=['CANCER_TYPE','MSI_STATUS'],values='COUNT')\n fig.update_traces( texttemplate = \"%{label}: %{value} \")\n #fig.update_layout(showlegend=False)\n fig.show()\n\n msi_tmp_df=cli[['CANCER_TYPE','MSI_STATUS']]\n msi_tmp_df.columns=pd.Index(['癌症类型','MSI状态'])\n\n fig = px.parallel_categories(msi_tmp_df,)\n fig.show()\nexcept Exception as e:\n print('Data selected don`t support this calculation.')\n print(e)",
"_____no_output_____"
]
],
[
[
"## 基因融合详情",
"_____no_output_____"
]
],
[
[
"try:\n pd.set_option('display.max_rows', None)\n \n display(cdx.sv)\nexcept:\n print('Data selected don`t support this calculation.')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74f7e005105543b7598d7ae49a185f6816e28e3 | 2,886 | ipynb | Jupyter Notebook | PyPoll/Resources/main.ipynb | gianx1/Python-Challenge | 0ad721c23876313ce1394d52a23e2317b1d13c51 | [
"ADSL"
] | null | null | null | PyPoll/Resources/main.ipynb | gianx1/Python-Challenge | 0ad721c23876313ce1394d52a23e2317b1d13c51 | [
"ADSL"
] | null | null | null | PyPoll/Resources/main.ipynb | gianx1/Python-Challenge | 0ad721c23876313ce1394d52a23e2317b1d13c51 | [
"ADSL"
] | null | null | null | 28.86 | 153 | 0.477477 | [
[
[
"#import os module and csv file \nimport os\nimport csv\n\n\nelection_data_csv_path = os.path.join(\"election_data.csv\")\n\ncandidates = []\npercent_of_candidates = []\ntotal_num_votes_per_candidate = []\n\n\nwith open(election_data_csv_path) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n first_row = next(csvreader)\n \n \n # get total number of votes *which is just the total number of rows*\n for row in csvreader:\n \n # list of candidates who received votes\n \n if row[2] not in candidates:\n candidates.append(row[2])\n total_num_votes_per_candidate.append(1)\n else:\n cand = candidates.index(row[2])\n total_num_votes_per_candidate[cand] = total_num_votes_per_candidate[cand] + 1\n \n total_votes = sum(total_num_votes_per_candidate)\n \n \n max_votes = max(total_num_votes_per_candidate)\n\n index_winner = total_num_votes_per_candidate.index(max_votes)\n\n winner = candidates[index_winner]\n \n#results\nprint(\"Election Results\")\nprint(\"-------------------------\")\nprint(\"Total Votes: \" + str(total_votes))\nprint(\"-------------------------\") \nfor i in range(len(candidates)):\n print(f\"{candidates[i]}: {str(round(total_num_votes_per_candidate[i]/total_votes * 100, 4))} % ({total_num_votes_per_candidate[i]})\") \nprint(\"-------------------------\")\nprint(\"Winner: \" + str(winner))\nprint(\"-------------------------\") ",
"Election Results\n-------------------------\nTotal Votes: 3521001\n-------------------------\nKhan: 63.0 % (2218231)\nCorrey: 20.0 % (704200)\nLi: 14.0 % (492940)\nO'Tooley: 3.0 % (105630)\n-------------------------\nWinner: Khan\n-------------------------\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e74f80e3ee471d43b19a73f4b1b1be0e8f27e8a2 | 196,675 | ipynb | Jupyter Notebook | SVM_with_classic_feature_extraction.ipynb | rishabhrshanbhag/Face-Detection-Employee | 10f13acab60223da83b60cb35e7a93401c17f664 | [
"MIT"
] | null | null | null | SVM_with_classic_feature_extraction.ipynb | rishabhrshanbhag/Face-Detection-Employee | 10f13acab60223da83b60cb35e7a93401c17f664 | [
"MIT"
] | null | null | null | SVM_with_classic_feature_extraction.ipynb | rishabhrshanbhag/Face-Detection-Employee | 10f13acab60223da83b60cb35e7a93401c17f664 | [
"MIT"
] | null | null | null | 112.129418 | 81,264 | 0.801373 | [
[
[
"import cv2\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nfrom imutils import face_utils\nfont = cv2.FONT_HERSHEY_SIMPLEX",
"_____no_output_____"
],
[
"import os\n\ndef get_files(path):\n return os.listdir(path)\n\ncascPath = \"/Users/abdulrehman/opt/anaconda3/envs/Face-Detection/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_default.xml\"\n\ndef return_bbx(image):\n faceCascade = cv2.CascadeClassifier(cascPath)\n faces = faceCascade.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE)\n return faces",
"_____no_output_____"
],
[
"get_files('/Users/abdulrehman/Desktop/SML Project/FacesInTheWild/lfw-deepfunneled')",
"_____no_output_____"
],
[
"Dataset_path = '/Users/abdulrehman/Desktop/SML Project/FacesInTheWild/'\n\nCelebs = pd.read_csv(Dataset_path+'lfw_allnames.csv')\nCelebs = Celebs[Celebs['images']>50]\nCelebs",
"_____no_output_____"
],
[
"for _,[name,images] in Celebs.iterrows():\n print(name)\n print(get_files(Dataset_path+'lfw-deepfunneled/'+name))\n print('\\n\\n')",
"Ariel_Sharon\n['Ariel_Sharon_0050.jpg', 'Ariel_Sharon_0044.jpg', 'Ariel_Sharon_0045.jpg', 'Ariel_Sharon_0051.jpg', 'Ariel_Sharon_0047.jpg', 'Ariel_Sharon_0053.jpg', 'Ariel_Sharon_0052.jpg', 'Ariel_Sharon_0046.jpg', 'Ariel_Sharon_0042.jpg', 'Ariel_Sharon_0056.jpg', 'Ariel_Sharon_0057.jpg', 'Ariel_Sharon_0043.jpg', 'Ariel_Sharon_0055.jpg', 'Ariel_Sharon_0041.jpg', 'Ariel_Sharon_0069.jpg', 'Ariel_Sharon_0068.jpg', 'Ariel_Sharon_0040.jpg', 'Ariel_Sharon_0054.jpg', 'Ariel_Sharon_0033.jpg', 'Ariel_Sharon_0027.jpg', 'Ariel_Sharon_0026.jpg', 'Ariel_Sharon_0032.jpg', 'Ariel_Sharon_0018.jpg', 'Ariel_Sharon_0024.jpg', 'Ariel_Sharon_0030.jpg', 'Ariel_Sharon_0031.jpg', 'Ariel_Sharon_0025.jpg', 'Ariel_Sharon_0019.jpg', 'Ariel_Sharon_0021.jpg', 'Ariel_Sharon_0035.jpg', 'Ariel_Sharon_0009.jpg', 'Ariel_Sharon_0008.jpg', 'Ariel_Sharon_0034.jpg', 'Ariel_Sharon_0020.jpg', 'Ariel_Sharon_0036.jpg', 'Ariel_Sharon_0022.jpg', 'Ariel_Sharon_0023.jpg', 'Ariel_Sharon_0037.jpg', 'Ariel_Sharon_0012.jpg', 'Ariel_Sharon_0006.jpg', 'Ariel_Sharon_0007.jpg', 'Ariel_Sharon_0013.jpg', 'Ariel_Sharon_0039.jpg', 'Ariel_Sharon_0005.jpg', 'Ariel_Sharon_0011.jpg', 'Ariel_Sharon_0010.jpg', 'Ariel_Sharon_0004.jpg', 'Ariel_Sharon_0038.jpg', 'Ariel_Sharon_0014.jpg', 'Ariel_Sharon_0028.jpg', 'Ariel_Sharon_0029.jpg', 'Ariel_Sharon_0015.jpg', 'Ariel_Sharon_0001.jpg', 'Ariel_Sharon_0017.jpg', 'Ariel_Sharon_0003.jpg', 'Ariel_Sharon_0002.jpg', 'Ariel_Sharon_0016.jpg', 'Ariel_Sharon_0059.jpg', 'Ariel_Sharon_0071.jpg', 'Ariel_Sharon_0065.jpg', 'Ariel_Sharon_0064.jpg', 'Ariel_Sharon_0070.jpg', 'Ariel_Sharon_0058.jpg', 'Ariel_Sharon_0066.jpg', 'Ariel_Sharon_0072.jpg', 'Ariel_Sharon_0073.jpg', 'Ariel_Sharon_0067.jpg', 'Ariel_Sharon_0063.jpg', 'Ariel_Sharon_0077.jpg', 'Ariel_Sharon_0076.jpg', 'Ariel_Sharon_0062.jpg', 'Ariel_Sharon_0074.jpg', 'Ariel_Sharon_0060.jpg', 'Ariel_Sharon_0048.jpg', 'Ariel_Sharon_0049.jpg', 'Ariel_Sharon_0061.jpg', 'Ariel_Sharon_0075.jpg']\n\n\n\nColin_Powell\n['Colin_Powell_0195.jpg', 'Colin_Powell_0181.jpg', 'Colin_Powell_0156.jpg', 'Colin_Powell_0142.jpg', 'Colin_Powell_0022.jpg', 'Colin_Powell_0036.jpg', 'Colin_Powell_0208.jpg', 'Colin_Powell_0220.jpg', 'Colin_Powell_0234.jpg', 'Colin_Powell_0235.jpg', 'Colin_Powell_0221.jpg', 'Colin_Powell_0209.jpg', 'Colin_Powell_0037.jpg', 'Colin_Powell_0023.jpg', 'Colin_Powell_0143.jpg', 'Colin_Powell_0157.jpg', 'Colin_Powell_0180.jpg', 'Colin_Powell_0194.jpg', 'Colin_Powell_0182.jpg', 'Colin_Powell_0196.jpg', 'Colin_Powell_0169.jpg', 'Colin_Powell_0141.jpg', 'Colin_Powell_0155.jpg', 'Colin_Powell_0009.jpg', 'Colin_Powell_0035.jpg', 'Colin_Powell_0021.jpg', 'Colin_Powell_0223.jpg', 'Colin_Powell_0222.jpg', 'Colin_Powell_0236.jpg', 'Colin_Powell_0020.jpg', 'Colin_Powell_0034.jpg', 'Colin_Powell_0008.jpg', 'Colin_Powell_0154.jpg', 'Colin_Powell_0140.jpg', 'Colin_Powell_0168.jpg', 'Colin_Powell_0197.jpg', 'Colin_Powell_0183.jpg', 'Colin_Powell_0187.jpg', 'Colin_Powell_0193.jpg', 'Colin_Powell_0144.jpg', 'Colin_Powell_0150.jpg', 'Colin_Powell_0178.jpg', 'Colin_Powell_0030.jpg', 'Colin_Powell_0024.jpg', 'Colin_Powell_0018.jpg', 'Colin_Powell_0232.jpg', 'Colin_Powell_0226.jpg', 'Colin_Powell_0227.jpg', 'Colin_Powell_0233.jpg', 'Colin_Powell_0019.jpg', 'Colin_Powell_0025.jpg', 'Colin_Powell_0031.jpg', 'Colin_Powell_0179.jpg', 'Colin_Powell_0151.jpg', 'Colin_Powell_0145.jpg', 'Colin_Powell_0192.jpg', 'Colin_Powell_0186.jpg', 'Colin_Powell_0190.jpg', 'Colin_Powell_0184.jpg', 'Colin_Powell_0153.jpg', 'Colin_Powell_0147.jpg', 'Colin_Powell_0027.jpg', 'Colin_Powell_0033.jpg', 'Colin_Powell_0225.jpg', 'Colin_Powell_0231.jpg', 'Colin_Powell_0219.jpg', 'Colin_Powell_0218.jpg', 'Colin_Powell_0230.jpg', 'Colin_Powell_0224.jpg', 'Colin_Powell_0032.jpg', 'Colin_Powell_0026.jpg', 'Colin_Powell_0146.jpg', 'Colin_Powell_0152.jpg', 'Colin_Powell_0185.jpg', 'Colin_Powell_0191.jpg', 'Colin_Powell_0109.jpg', 'Colin_Powell_0135.jpg', 'Colin_Powell_0121.jpg', 'Colin_Powell_0082.jpg', 'Colin_Powell_0096.jpg', 'Colin_Powell_0069.jpg', 'Colin_Powell_0041.jpg', 'Colin_Powell_0055.jpg', 'Colin_Powell_0054.jpg', 'Colin_Powell_0040.jpg', 'Colin_Powell_0068.jpg', 'Colin_Powell_0097.jpg', 'Colin_Powell_0083.jpg', 'Colin_Powell_0120.jpg', 'Colin_Powell_0134.jpg', 'Colin_Powell_0108.jpg', 'Colin_Powell_0122.jpg', 'Colin_Powell_0136.jpg', 'Colin_Powell_0095.jpg', 'Colin_Powell_0081.jpg', 'Colin_Powell_0056.jpg', 'Colin_Powell_0042.jpg', 'Colin_Powell_0043.jpg', 'Colin_Powell_0057.jpg', 'Colin_Powell_0080.jpg', 'Colin_Powell_0094.jpg', 'Colin_Powell_0137.jpg', 'Colin_Powell_0123.jpg', 'Colin_Powell_0127.jpg', 'Colin_Powell_0133.jpg', 'Colin_Powell_0090.jpg', 'Colin_Powell_0084.jpg', 'Colin_Powell_0053.jpg', 'Colin_Powell_0047.jpg', 'Colin_Powell_0046.jpg', 'Colin_Powell_0052.jpg', 'Colin_Powell_0085.jpg', 'Colin_Powell_0091.jpg', 'Colin_Powell_0132.jpg', 'Colin_Powell_0126.jpg', 'Colin_Powell_0130.jpg', 'Colin_Powell_0124.jpg', 'Colin_Powell_0118.jpg', 'Colin_Powell_0087.jpg', 'Colin_Powell_0093.jpg', 'Colin_Powell_0044.jpg', 'Colin_Powell_0050.jpg', 'Colin_Powell_0078.jpg', 'Colin_Powell_0079.jpg', 'Colin_Powell_0051.jpg', 'Colin_Powell_0045.jpg', 'Colin_Powell_0092.jpg', 'Colin_Powell_0086.jpg', 'Colin_Powell_0119.jpg', 'Colin_Powell_0125.jpg', 'Colin_Powell_0131.jpg', 'Colin_Powell_0128.jpg', 'Colin_Powell_0114.jpg', 'Colin_Powell_0100.jpg', 'Colin_Powell_0048.jpg', 'Colin_Powell_0060.jpg', 'Colin_Powell_0074.jpg', 'Colin_Powell_0075.jpg', 'Colin_Powell_0061.jpg', 'Colin_Powell_0049.jpg', 'Colin_Powell_0101.jpg', 'Colin_Powell_0115.jpg', 'Colin_Powell_0129.jpg', 'Colin_Powell_0103.jpg', 'Colin_Powell_0117.jpg', 'Colin_Powell_0088.jpg', 'Colin_Powell_0077.jpg', 'Colin_Powell_0063.jpg', 'Colin_Powell_0062.jpg', 'Colin_Powell_0076.jpg', 'Colin_Powell_0089.jpg', 'Colin_Powell_0116.jpg', 'Colin_Powell_0102.jpg', 'Colin_Powell_0106.jpg', 'Colin_Powell_0112.jpg', 'Colin_Powell_0099.jpg', 'Colin_Powell_0072.jpg', 'Colin_Powell_0066.jpg', 'Colin_Powell_0067.jpg', 'Colin_Powell_0073.jpg', 'Colin_Powell_0098.jpg', 'Colin_Powell_0113.jpg', 'Colin_Powell_0107.jpg', 'Colin_Powell_0111.jpg', 'Colin_Powell_0105.jpg', 'Colin_Powell_0139.jpg', 'Colin_Powell_0065.jpg', 'Colin_Powell_0071.jpg', 'Colin_Powell_0059.jpg', 'Colin_Powell_0058.jpg', 'Colin_Powell_0070.jpg', 'Colin_Powell_0064.jpg', 'Colin_Powell_0138.jpg', 'Colin_Powell_0104.jpg', 'Colin_Powell_0110.jpg', 'Colin_Powell_0188.jpg', 'Colin_Powell_0177.jpg', 'Colin_Powell_0163.jpg', 'Colin_Powell_0003.jpg', 'Colin_Powell_0017.jpg', 'Colin_Powell_0229.jpg', 'Colin_Powell_0201.jpg', 'Colin_Powell_0215.jpg', 'Colin_Powell_0214.jpg', 'Colin_Powell_0200.jpg', 'Colin_Powell_0228.jpg', 'Colin_Powell_0016.jpg', 'Colin_Powell_0002.jpg', 'Colin_Powell_0162.jpg', 'Colin_Powell_0176.jpg', 'Colin_Powell_0189.jpg', 'Colin_Powell_0148.jpg', 'Colin_Powell_0160.jpg', 'Colin_Powell_0174.jpg', 'Colin_Powell_0028.jpg', 'Colin_Powell_0014.jpg', 'Colin_Powell_0216.jpg', 'Colin_Powell_0202.jpg', 'Colin_Powell_0203.jpg', 'Colin_Powell_0217.jpg', 'Colin_Powell_0001.jpg', 'Colin_Powell_0015.jpg', 'Colin_Powell_0029.jpg', 'Colin_Powell_0175.jpg', 'Colin_Powell_0161.jpg', 'Colin_Powell_0149.jpg', 'Colin_Powell_0165.jpg', 'Colin_Powell_0171.jpg', 'Colin_Powell_0159.jpg', 'Colin_Powell_0011.jpg', 'Colin_Powell_0005.jpg', 'Colin_Powell_0039.jpg', 'Colin_Powell_0213.jpg', 'Colin_Powell_0207.jpg', 'Colin_Powell_0206.jpg', 'Colin_Powell_0212.jpg', 'Colin_Powell_0038.jpg', 'Colin_Powell_0004.jpg', 'Colin_Powell_0010.jpg', 'Colin_Powell_0158.jpg', 'Colin_Powell_0170.jpg', 'Colin_Powell_0164.jpg', 'Colin_Powell_0199.jpg', 'Colin_Powell_0172.jpg', 'Colin_Powell_0166.jpg', 'Colin_Powell_0006.jpg', 'Colin_Powell_0012.jpg', 'Colin_Powell_0204.jpg', 'Colin_Powell_0210.jpg', 'Colin_Powell_0211.jpg', 'Colin_Powell_0205.jpg', 'Colin_Powell_0013.jpg', 'Colin_Powell_0007.jpg', 'Colin_Powell_0167.jpg', 'Colin_Powell_0173.jpg', 'Colin_Powell_0198.jpg']\n\n\n\nDonald_Rumsfeld\n['Donald_Rumsfeld_0029.jpg', 'Donald_Rumsfeld_0015.jpg', 'Donald_Rumsfeld_0001.jpg', 'Donald_Rumsfeld_0014.jpg', 'Donald_Rumsfeld_0028.jpg', 'Donald_Rumsfeld_0002.jpg', 'Donald_Rumsfeld_0016.jpg', 'Donald_Rumsfeld_0017.jpg', 'Donald_Rumsfeld_0003.jpg', 'Donald_Rumsfeld_0007.jpg', 'Donald_Rumsfeld_0013.jpg', 'Donald_Rumsfeld_0012.jpg', 'Donald_Rumsfeld_0006.jpg', 'Donald_Rumsfeld_0010.jpg', 'Donald_Rumsfeld_0004.jpg', 'Donald_Rumsfeld_0038.jpg', 'Donald_Rumsfeld_0039.jpg', 'Donald_Rumsfeld_0005.jpg', 'Donald_Rumsfeld_0011.jpg', 'Donald_Rumsfeld_0089.jpg', 'Donald_Rumsfeld_0076.jpg', 'Donald_Rumsfeld_0062.jpg', 'Donald_Rumsfeld_0102.jpg', 'Donald_Rumsfeld_0116.jpg', 'Donald_Rumsfeld_0117.jpg', 'Donald_Rumsfeld_0103.jpg', 'Donald_Rumsfeld_0063.jpg', 'Donald_Rumsfeld_0077.jpg', 'Donald_Rumsfeld_0088.jpg', 'Donald_Rumsfeld_0049.jpg', 'Donald_Rumsfeld_0061.jpg', 'Donald_Rumsfeld_0075.jpg', 'Donald_Rumsfeld_0115.jpg', 'Donald_Rumsfeld_0101.jpg', 'Donald_Rumsfeld_0100.jpg', 'Donald_Rumsfeld_0114.jpg', 'Donald_Rumsfeld_0074.jpg', 'Donald_Rumsfeld_0060.jpg', 'Donald_Rumsfeld_0048.jpg', 'Donald_Rumsfeld_0064.jpg', 'Donald_Rumsfeld_0070.jpg', 'Donald_Rumsfeld_0058.jpg', 'Donald_Rumsfeld_0110.jpg', 'Donald_Rumsfeld_0104.jpg', 'Donald_Rumsfeld_0105.jpg', 'Donald_Rumsfeld_0111.jpg', 'Donald_Rumsfeld_0059.jpg', 'Donald_Rumsfeld_0071.jpg', 'Donald_Rumsfeld_0065.jpg', 'Donald_Rumsfeld_0098.jpg', 'Donald_Rumsfeld_0073.jpg', 'Donald_Rumsfeld_0067.jpg', 'Donald_Rumsfeld_0107.jpg', 'Donald_Rumsfeld_0113.jpg', 'Donald_Rumsfeld_0112.jpg', 'Donald_Rumsfeld_0106.jpg', 'Donald_Rumsfeld_0066.jpg', 'Donald_Rumsfeld_0072.jpg', 'Donald_Rumsfeld_0099.jpg', 'Donald_Rumsfeld_0094.jpg', 'Donald_Rumsfeld_0080.jpg', 'Donald_Rumsfeld_0057.jpg', 'Donald_Rumsfeld_0043.jpg', 'Donald_Rumsfeld_0042.jpg', 'Donald_Rumsfeld_0056.jpg', 'Donald_Rumsfeld_0081.jpg', 'Donald_Rumsfeld_0095.jpg', 'Donald_Rumsfeld_0083.jpg', 'Donald_Rumsfeld_0097.jpg', 'Donald_Rumsfeld_0068.jpg', 'Donald_Rumsfeld_0040.jpg', 'Donald_Rumsfeld_0054.jpg', 'Donald_Rumsfeld_0108.jpg', 'Donald_Rumsfeld_0120.jpg', 'Donald_Rumsfeld_0121.jpg', 'Donald_Rumsfeld_0109.jpg', 'Donald_Rumsfeld_0055.jpg', 'Donald_Rumsfeld_0041.jpg', 'Donald_Rumsfeld_0069.jpg', 'Donald_Rumsfeld_0096.jpg', 'Donald_Rumsfeld_0082.jpg', 'Donald_Rumsfeld_0086.jpg', 'Donald_Rumsfeld_0092.jpg', 'Donald_Rumsfeld_0045.jpg', 'Donald_Rumsfeld_0051.jpg', 'Donald_Rumsfeld_0079.jpg', 'Donald_Rumsfeld_0119.jpg', 'Donald_Rumsfeld_0118.jpg', 'Donald_Rumsfeld_0078.jpg', 'Donald_Rumsfeld_0050.jpg', 'Donald_Rumsfeld_0044.jpg', 'Donald_Rumsfeld_0093.jpg', 'Donald_Rumsfeld_0087.jpg', 'Donald_Rumsfeld_0091.jpg', 'Donald_Rumsfeld_0085.jpg', 'Donald_Rumsfeld_0052.jpg', 'Donald_Rumsfeld_0046.jpg', 'Donald_Rumsfeld_0047.jpg', 'Donald_Rumsfeld_0053.jpg', 'Donald_Rumsfeld_0084.jpg', 'Donald_Rumsfeld_0090.jpg', 'Donald_Rumsfeld_0008.jpg', 'Donald_Rumsfeld_0034.jpg', 'Donald_Rumsfeld_0020.jpg', 'Donald_Rumsfeld_0021.jpg', 'Donald_Rumsfeld_0035.jpg', 'Donald_Rumsfeld_0009.jpg', 'Donald_Rumsfeld_0023.jpg', 'Donald_Rumsfeld_0037.jpg', 'Donald_Rumsfeld_0036.jpg', 'Donald_Rumsfeld_0022.jpg', 'Donald_Rumsfeld_0026.jpg', 'Donald_Rumsfeld_0032.jpg', 'Donald_Rumsfeld_0033.jpg', 'Donald_Rumsfeld_0027.jpg', 'Donald_Rumsfeld_0031.jpg', 'Donald_Rumsfeld_0025.jpg', 'Donald_Rumsfeld_0019.jpg', 'Donald_Rumsfeld_0018.jpg', 'Donald_Rumsfeld_0024.jpg', 'Donald_Rumsfeld_0030.jpg']\n\n\n\nGeorge_W_Bush\n['George_W_Bush_0387.jpg', 'George_W_Bush_0393.jpg', 'George_W_Bush_0378.jpg', 'George_W_Bush_0422.jpg', 'George_W_Bush_0344.jpg', 'George_W_Bush_0350.jpg', 'George_W_Bush_0436.jpg', 'George_W_Bush_0185.jpg', 'George_W_Bush_0191.jpg', 'George_W_Bush_0146.jpg', 'George_W_Bush_0152.jpg', 'George_W_Bush_0032.jpg', 'George_W_Bush_0026.jpg', 'George_W_Bush_0218.jpg', 'George_W_Bush_0230.jpg', 'George_W_Bush_0224.jpg', 'George_W_Bush_0225.jpg', 'George_W_Bush_0231.jpg', 'George_W_Bush_0219.jpg', 'George_W_Bush_0027.jpg', 'George_W_Bush_0033.jpg', 'George_W_Bush_0153.jpg', 'George_W_Bush_0147.jpg', 'George_W_Bush_0190.jpg', 'George_W_Bush_0184.jpg', 'George_W_Bush_0351.jpg', 'George_W_Bush_0437.jpg', 'George_W_Bush_0423.jpg', 'George_W_Bush_0345.jpg', 'George_W_Bush_0379.jpg', 'George_W_Bush_0392.jpg', 'George_W_Bush_0386.jpg', 'George_W_Bush_0390.jpg', 'George_W_Bush_0384.jpg', 'George_W_Bush_0409.jpg', 'George_W_Bush_0435.jpg', 'George_W_Bush_0353.jpg', 'George_W_Bush_0347.jpg', 'George_W_Bush_0421.jpg', 'George_W_Bush_0192.jpg', 'George_W_Bush_0186.jpg', 'George_W_Bush_0179.jpg', 'George_W_Bush_0151.jpg', 'George_W_Bush_0145.jpg', 'George_W_Bush_0019.jpg', 'George_W_Bush_0025.jpg', 'George_W_Bush_0031.jpg', 'George_W_Bush_0227.jpg', 'George_W_Bush_0233.jpg', 'George_W_Bush_0232.jpg', 'George_W_Bush_0226.jpg', 'George_W_Bush_0030.jpg', 'George_W_Bush_0024.jpg', 'George_W_Bush_0018.jpg', 'George_W_Bush_0144.jpg', 'George_W_Bush_0150.jpg', 'George_W_Bush_0178.jpg', 'George_W_Bush_0187.jpg', 'George_W_Bush_0193.jpg', 'George_W_Bush_0346.jpg', 'George_W_Bush_0420.jpg', 'George_W_Bush_0434.jpg', 'George_W_Bush_0352.jpg', 'George_W_Bush_0408.jpg', 'George_W_Bush_0385.jpg', 'George_W_Bush_0391.jpg', 'George_W_Bush_0395.jpg', 'George_W_Bush_0381.jpg', 'George_W_Bush_0356.jpg', 'George_W_Bush_0430.jpg', 'George_W_Bush_0424.jpg', 'George_W_Bush_0342.jpg', 'George_W_Bush_0418.jpg', 'George_W_Bush_0197.jpg', 'George_W_Bush_0183.jpg', 'George_W_Bush_0154.jpg', 'George_W_Bush_0140.jpg', 'George_W_Bush_0168.jpg', 'George_W_Bush_0020.jpg', 'George_W_Bush_0034.jpg', 'George_W_Bush_0008.jpg', 'George_W_Bush_0222.jpg', 'George_W_Bush_0236.jpg', 'George_W_Bush_0237.jpg', 'George_W_Bush_0223.jpg', 'George_W_Bush_0009.jpg', 'George_W_Bush_0035.jpg', 'George_W_Bush_0021.jpg', 'George_W_Bush_0169.jpg', 'George_W_Bush_0141.jpg', 'George_W_Bush_0155.jpg', 'George_W_Bush_0182.jpg', 'George_W_Bush_0196.jpg', 'George_W_Bush_0419.jpg', 'George_W_Bush_0425.jpg', 'George_W_Bush_0343.jpg', 'George_W_Bush_0357.jpg', 'George_W_Bush_0431.jpg', 'George_W_Bush_0380.jpg', 'George_W_Bush_0394.jpg', 'George_W_Bush_0382.jpg', 'George_W_Bush_0396.jpg', 'George_W_Bush_0341.jpg', 'George_W_Bush_0427.jpg', 'George_W_Bush_0433.jpg', 'George_W_Bush_0355.jpg', 'George_W_Bush_0369.jpg', 'George_W_Bush_0180.jpg', 'George_W_Bush_0194.jpg', 'George_W_Bush_0143.jpg', 'George_W_Bush_0157.jpg', 'George_W_Bush_0037.jpg', 'George_W_Bush_0023.jpg', 'George_W_Bush_0235.jpg', 'George_W_Bush_0221.jpg', 'George_W_Bush_0209.jpg', 'George_W_Bush_0208.jpg', 'George_W_Bush_0220.jpg', 'George_W_Bush_0234.jpg', 'George_W_Bush_0022.jpg', 'George_W_Bush_0036.jpg', 'George_W_Bush_0156.jpg', 'George_W_Bush_0142.jpg', 'George_W_Bush_0195.jpg', 'George_W_Bush_0181.jpg', 'George_W_Bush_0368.jpg', 'George_W_Bush_0432.jpg', 'George_W_Bush_0354.jpg', 'George_W_Bush_0340.jpg', 'George_W_Bush_0426.jpg', 'George_W_Bush_0397.jpg', 'George_W_Bush_0383.jpg', 'George_W_Bush_0482.jpg', 'George_W_Bush_0496.jpg', 'George_W_Bush_0469.jpg', 'George_W_Bush_0327.jpg', 'George_W_Bush_0441.jpg', 'George_W_Bush_0455.jpg', 'George_W_Bush_0333.jpg', 'George_W_Bush_0119.jpg', 'George_W_Bush_0125.jpg', 'George_W_Bush_0131.jpg', 'George_W_Bush_0092.jpg', 'George_W_Bush_0086.jpg', 'George_W_Bush_0079.jpg', 'George_W_Bush_0051.jpg', 'George_W_Bush_0045.jpg', 'George_W_Bush_0290.jpg', 'George_W_Bush_0284.jpg', 'George_W_Bush_0509.jpg', 'George_W_Bush_0253.jpg', 'George_W_Bush_0521.jpg', 'George_W_Bush_0247.jpg', 'George_W_Bush_0520.jpg', 'George_W_Bush_0246.jpg', 'George_W_Bush_0252.jpg', 'George_W_Bush_0508.jpg', 'George_W_Bush_0285.jpg', 'George_W_Bush_0291.jpg', 'George_W_Bush_0044.jpg', 'George_W_Bush_0050.jpg', 'George_W_Bush_0078.jpg', 'George_W_Bush_0087.jpg', 'George_W_Bush_0093.jpg', 'George_W_Bush_0130.jpg', 'George_W_Bush_0124.jpg', 'George_W_Bush_0118.jpg', 'George_W_Bush_0454.jpg', 'George_W_Bush_0332.jpg', 'George_W_Bush_0326.jpg', 'George_W_Bush_0440.jpg', 'George_W_Bush_0468.jpg', 'George_W_Bush_0497.jpg', 'George_W_Bush_0483.jpg', 'George_W_Bush_0495.jpg', 'George_W_Bush_0481.jpg', 'George_W_Bush_0318.jpg', 'George_W_Bush_0330.jpg', 'George_W_Bush_0456.jpg', 'George_W_Bush_0442.jpg', 'George_W_Bush_0324.jpg', 'George_W_Bush_0132.jpg', 'George_W_Bush_0126.jpg', 'George_W_Bush_0085.jpg', 'George_W_Bush_0091.jpg', 'George_W_Bush_0046.jpg', 'George_W_Bush_0052.jpg', 'George_W_Bush_0287.jpg', 'George_W_Bush_0293.jpg', 'George_W_Bush_0278.jpg', 'George_W_Bush_0244.jpg', 'George_W_Bush_0522.jpg', 'George_W_Bush_0250.jpg', 'George_W_Bush_0251.jpg', 'George_W_Bush_0245.jpg', 'George_W_Bush_0523.jpg', 'George_W_Bush_0279.jpg', 'George_W_Bush_0292.jpg', 'George_W_Bush_0286.jpg', 'George_W_Bush_0053.jpg', 'George_W_Bush_0047.jpg', 'George_W_Bush_0090.jpg', 'George_W_Bush_0084.jpg', 'George_W_Bush_0127.jpg', 'George_W_Bush_0133.jpg', 'George_W_Bush_0443.jpg', 'George_W_Bush_0325.jpg', 'George_W_Bush_0331.jpg', 'George_W_Bush_0457.jpg', 'George_W_Bush_0319.jpg', 'George_W_Bush_0480.jpg', 'George_W_Bush_0494.jpg', 'George_W_Bush_0490.jpg', 'George_W_Bush_0484.jpg', 'George_W_Bush_0453.jpg', 'George_W_Bush_0335.jpg', 'George_W_Bush_0321.jpg', 'George_W_Bush_0447.jpg', 'George_W_Bush_0309.jpg', 'George_W_Bush_0137.jpg', 'George_W_Bush_0123.jpg', 'George_W_Bush_0080.jpg', 'George_W_Bush_0094.jpg', 'George_W_Bush_0043.jpg', 'George_W_Bush_0057.jpg', 'George_W_Bush_0282.jpg', 'George_W_Bush_0296.jpg', 'George_W_Bush_0527.jpg', 'George_W_Bush_0241.jpg', 'George_W_Bush_0255.jpg', 'George_W_Bush_0269.jpg', 'George_W_Bush_0268.jpg', 'George_W_Bush_0254.jpg', 'George_W_Bush_0526.jpg', 'George_W_Bush_0240.jpg', 'George_W_Bush_0297.jpg', 'George_W_Bush_0283.jpg', 'George_W_Bush_0056.jpg', 'George_W_Bush_0042.jpg', 'George_W_Bush_0095.jpg', 'George_W_Bush_0081.jpg', 'George_W_Bush_0122.jpg', 'George_W_Bush_0136.jpg', 'George_W_Bush_0308.jpg', 'George_W_Bush_0320.jpg', 'George_W_Bush_0446.jpg', 'George_W_Bush_0452.jpg', 'George_W_Bush_0334.jpg', 'George_W_Bush_0485.jpg', 'George_W_Bush_0491.jpg', 'George_W_Bush_0487.jpg', 'George_W_Bush_0493.jpg', 'George_W_Bush_0444.jpg', 'George_W_Bush_0322.jpg', 'George_W_Bush_0336.jpg', 'George_W_Bush_0450.jpg', 'George_W_Bush_0478.jpg', 'George_W_Bush_0120.jpg', 'George_W_Bush_0134.jpg', 'George_W_Bush_0108.jpg', 'George_W_Bush_0097.jpg', 'George_W_Bush_0083.jpg', 'George_W_Bush_0054.jpg', 'George_W_Bush_0040.jpg', 'George_W_Bush_0068.jpg', 'George_W_Bush_0295.jpg', 'George_W_Bush_0281.jpg', 'George_W_Bush_0530.jpg', 'George_W_Bush_0256.jpg', 'George_W_Bush_0242.jpg', 'George_W_Bush_0524.jpg', 'George_W_Bush_0518.jpg', 'George_W_Bush_0519.jpg', 'George_W_Bush_0243.jpg', 'George_W_Bush_0525.jpg', 'George_W_Bush_0257.jpg', 'George_W_Bush_0280.jpg', 'George_W_Bush_0294.jpg', 'George_W_Bush_0069.jpg', 'George_W_Bush_0041.jpg', 'George_W_Bush_0055.jpg', 'George_W_Bush_0082.jpg', 'George_W_Bush_0096.jpg', 'George_W_Bush_0109.jpg', 'George_W_Bush_0135.jpg', 'George_W_Bush_0121.jpg', 'George_W_Bush_0479.jpg', 'George_W_Bush_0337.jpg', 'George_W_Bush_0451.jpg', 'George_W_Bush_0445.jpg', 'George_W_Bush_0323.jpg', 'George_W_Bush_0492.jpg', 'George_W_Bush_0486.jpg', 'George_W_Bush_0448.jpg', 'George_W_Bush_0306.jpg', 'George_W_Bush_0460.jpg', 'George_W_Bush_0474.jpg', 'George_W_Bush_0312.jpg', 'George_W_Bush_0138.jpg', 'George_W_Bush_0104.jpg', 'George_W_Bush_0110.jpg', 'George_W_Bush_0058.jpg', 'George_W_Bush_0070.jpg', 'George_W_Bush_0064.jpg', 'George_W_Bush_0299.jpg', 'George_W_Bush_0528.jpg', 'George_W_Bush_0272.jpg', 'George_W_Bush_0514.jpg', 'George_W_Bush_0500.jpg', 'George_W_Bush_0266.jpg', 'George_W_Bush_0501.jpg', 'George_W_Bush_0267.jpg', 'George_W_Bush_0273.jpg', 'George_W_Bush_0515.jpg', 'George_W_Bush_0529.jpg', 'George_W_Bush_0298.jpg', 'George_W_Bush_0065.jpg', 'George_W_Bush_0071.jpg', 'George_W_Bush_0059.jpg', 'George_W_Bush_0111.jpg', 'George_W_Bush_0105.jpg', 'George_W_Bush_0139.jpg', 'George_W_Bush_0475.jpg', 'George_W_Bush_0313.jpg', 'George_W_Bush_0307.jpg', 'George_W_Bush_0461.jpg', 'George_W_Bush_0449.jpg', 'George_W_Bush_0488.jpg', 'George_W_Bush_0339.jpg', 'George_W_Bush_0311.jpg', 'George_W_Bush_0477.jpg', 'George_W_Bush_0463.jpg', 'George_W_Bush_0305.jpg', 'George_W_Bush_0113.jpg', 'George_W_Bush_0107.jpg', 'George_W_Bush_0098.jpg', 'George_W_Bush_0067.jpg', 'George_W_Bush_0073.jpg', 'George_W_Bush_0259.jpg', 'George_W_Bush_0265.jpg', 'George_W_Bush_0503.jpg', 'George_W_Bush_0517.jpg', 'George_W_Bush_0271.jpg', 'George_W_Bush_0516.jpg', 'George_W_Bush_0270.jpg', 'George_W_Bush_0264.jpg', 'George_W_Bush_0502.jpg', 'George_W_Bush_0258.jpg', 'George_W_Bush_0072.jpg', 'George_W_Bush_0066.jpg', 'George_W_Bush_0099.jpg', 'George_W_Bush_0106.jpg', 'George_W_Bush_0112.jpg', 'George_W_Bush_0462.jpg', 'George_W_Bush_0304.jpg', 'George_W_Bush_0310.jpg', 'George_W_Bush_0476.jpg', 'George_W_Bush_0338.jpg', 'George_W_Bush_0489.jpg', 'George_W_Bush_0499.jpg', 'George_W_Bush_0472.jpg', 'George_W_Bush_0314.jpg', 'George_W_Bush_0300.jpg', 'George_W_Bush_0466.jpg', 'George_W_Bush_0328.jpg', 'George_W_Bush_0116.jpg', 'George_W_Bush_0102.jpg', 'George_W_Bush_0089.jpg', 'George_W_Bush_0062.jpg', 'George_W_Bush_0076.jpg', 'George_W_Bush_0506.jpg', 'George_W_Bush_0260.jpg', 'George_W_Bush_0274.jpg', 'George_W_Bush_0512.jpg', 'George_W_Bush_0248.jpg', 'George_W_Bush_0249.jpg', 'George_W_Bush_0275.jpg', 'George_W_Bush_0513.jpg', 'George_W_Bush_0507.jpg', 'George_W_Bush_0261.jpg', 'George_W_Bush_0077.jpg', 'George_W_Bush_0063.jpg', 'George_W_Bush_0088.jpg', 'George_W_Bush_0103.jpg', 'George_W_Bush_0117.jpg', 'George_W_Bush_0329.jpg', 'George_W_Bush_0301.jpg', 'George_W_Bush_0467.jpg', 'George_W_Bush_0473.jpg', 'George_W_Bush_0315.jpg', 'George_W_Bush_0498.jpg', 'George_W_Bush_0465.jpg', 'George_W_Bush_0303.jpg', 'George_W_Bush_0317.jpg', 'George_W_Bush_0471.jpg', 'George_W_Bush_0459.jpg', 'George_W_Bush_0101.jpg', 'George_W_Bush_0115.jpg', 'George_W_Bush_0129.jpg', 'George_W_Bush_0075.jpg', 'George_W_Bush_0061.jpg', 'George_W_Bush_0049.jpg', 'George_W_Bush_0288.jpg', 'George_W_Bush_0511.jpg', 'George_W_Bush_0277.jpg', 'George_W_Bush_0263.jpg', 'George_W_Bush_0505.jpg', 'George_W_Bush_0262.jpg', 'George_W_Bush_0504.jpg', 'George_W_Bush_0510.jpg', 'George_W_Bush_0276.jpg', 'George_W_Bush_0289.jpg', 'George_W_Bush_0048.jpg', 'George_W_Bush_0060.jpg', 'George_W_Bush_0074.jpg', 'George_W_Bush_0128.jpg', 'George_W_Bush_0114.jpg', 'George_W_Bush_0100.jpg', 'George_W_Bush_0458.jpg', 'George_W_Bush_0316.jpg', 'George_W_Bush_0470.jpg', 'George_W_Bush_0464.jpg', 'George_W_Bush_0302.jpg', 'George_W_Bush_0359.jpg', 'George_W_Bush_0403.jpg', 'George_W_Bush_0365.jpg', 'George_W_Bush_0371.jpg', 'George_W_Bush_0417.jpg', 'George_W_Bush_0198.jpg', 'George_W_Bush_0167.jpg', 'George_W_Bush_0173.jpg', 'George_W_Bush_0013.jpg', 'George_W_Bush_0007.jpg', 'George_W_Bush_0239.jpg', 'George_W_Bush_0211.jpg', 'George_W_Bush_0205.jpg', 'George_W_Bush_0204.jpg', 'George_W_Bush_0210.jpg', 'George_W_Bush_0238.jpg', 'George_W_Bush_0006.jpg', 'George_W_Bush_0012.jpg', 'George_W_Bush_0172.jpg', 'George_W_Bush_0166.jpg', 'George_W_Bush_0199.jpg', 'George_W_Bush_0370.jpg', 'George_W_Bush_0416.jpg', 'George_W_Bush_0402.jpg', 'George_W_Bush_0364.jpg', 'George_W_Bush_0358.jpg', 'George_W_Bush_0399.jpg', 'George_W_Bush_0428.jpg', 'George_W_Bush_0414.jpg', 'George_W_Bush_0372.jpg', 'George_W_Bush_0366.jpg', 'George_W_Bush_0400.jpg', 'George_W_Bush_0158.jpg', 'George_W_Bush_0170.jpg', 'George_W_Bush_0164.jpg', 'George_W_Bush_0038.jpg', 'George_W_Bush_0004.jpg', 'George_W_Bush_0010.jpg', 'George_W_Bush_0206.jpg', 'George_W_Bush_0212.jpg', 'George_W_Bush_0213.jpg', 'George_W_Bush_0207.jpg', 'George_W_Bush_0011.jpg', 'George_W_Bush_0005.jpg', 'George_W_Bush_0039.jpg', 'George_W_Bush_0165.jpg', 'George_W_Bush_0171.jpg', 'George_W_Bush_0159.jpg', 'George_W_Bush_0367.jpg', 'George_W_Bush_0401.jpg', 'George_W_Bush_0415.jpg', 'George_W_Bush_0373.jpg', 'George_W_Bush_0429.jpg', 'George_W_Bush_0398.jpg', 'George_W_Bush_0388.jpg', 'George_W_Bush_0377.jpg', 'George_W_Bush_0411.jpg', 'George_W_Bush_0405.jpg', 'George_W_Bush_0363.jpg', 'George_W_Bush_0439.jpg', 'George_W_Bush_0175.jpg', 'George_W_Bush_0161.jpg', 'George_W_Bush_0149.jpg', 'George_W_Bush_0001.jpg', 'George_W_Bush_0015.jpg', 'George_W_Bush_0029.jpg', 'George_W_Bush_0203.jpg', 'George_W_Bush_0217.jpg', 'George_W_Bush_0216.jpg', 'George_W_Bush_0202.jpg', 'George_W_Bush_0028.jpg', 'George_W_Bush_0014.jpg', 'George_W_Bush_0148.jpg', 'George_W_Bush_0160.jpg', 'George_W_Bush_0174.jpg', 'George_W_Bush_0438.jpg', 'George_W_Bush_0404.jpg', 'George_W_Bush_0362.jpg', 'George_W_Bush_0376.jpg', 'George_W_Bush_0410.jpg', 'George_W_Bush_0389.jpg', 'George_W_Bush_0360.jpg', 'George_W_Bush_0406.jpg', 'George_W_Bush_0412.jpg', 'George_W_Bush_0374.jpg', 'George_W_Bush_0348.jpg', 'George_W_Bush_0189.jpg', 'George_W_Bush_0162.jpg', 'George_W_Bush_0176.jpg', 'George_W_Bush_0016.jpg', 'George_W_Bush_0002.jpg', 'George_W_Bush_0214.jpg', 'George_W_Bush_0200.jpg', 'George_W_Bush_0228.jpg', 'George_W_Bush_0229.jpg', 'George_W_Bush_0201.jpg', 'George_W_Bush_0215.jpg', 'George_W_Bush_0003.jpg', 'George_W_Bush_0017.jpg', 'George_W_Bush_0177.jpg', 'George_W_Bush_0163.jpg', 'George_W_Bush_0188.jpg', 'George_W_Bush_0349.jpg', 'George_W_Bush_0413.jpg', 'George_W_Bush_0375.jpg', 'George_W_Bush_0361.jpg', 'George_W_Bush_0407.jpg']\n\n\n\nGerhard_Schroeder\n['Gerhard_Schroeder_0095.jpg', 'Gerhard_Schroeder_0081.jpg', 'Gerhard_Schroeder_0056.jpg', 'Gerhard_Schroeder_0042.jpg', 'Gerhard_Schroeder_0043.jpg', 'Gerhard_Schroeder_0057.jpg', 'Gerhard_Schroeder_0080.jpg', 'Gerhard_Schroeder_0094.jpg', 'Gerhard_Schroeder_0109.jpg', 'Gerhard_Schroeder_0082.jpg', 'Gerhard_Schroeder_0096.jpg', 'Gerhard_Schroeder_0069.jpg', 'Gerhard_Schroeder_0041.jpg', 'Gerhard_Schroeder_0055.jpg', 'Gerhard_Schroeder_0054.jpg', 'Gerhard_Schroeder_0040.jpg', 'Gerhard_Schroeder_0068.jpg', 'Gerhard_Schroeder_0097.jpg', 'Gerhard_Schroeder_0083.jpg', 'Gerhard_Schroeder_0108.jpg', 'Gerhard_Schroeder_0087.jpg', 'Gerhard_Schroeder_0093.jpg', 'Gerhard_Schroeder_0044.jpg', 'Gerhard_Schroeder_0050.jpg', 'Gerhard_Schroeder_0078.jpg', 'Gerhard_Schroeder_0079.jpg', 'Gerhard_Schroeder_0051.jpg', 'Gerhard_Schroeder_0045.jpg', 'Gerhard_Schroeder_0092.jpg', 'Gerhard_Schroeder_0086.jpg', 'Gerhard_Schroeder_0090.jpg', 'Gerhard_Schroeder_0084.jpg', 'Gerhard_Schroeder_0053.jpg', 'Gerhard_Schroeder_0047.jpg', 'Gerhard_Schroeder_0046.jpg', 'Gerhard_Schroeder_0052.jpg', 'Gerhard_Schroeder_0085.jpg', 'Gerhard_Schroeder_0091.jpg', 'Gerhard_Schroeder_0009.jpg', 'Gerhard_Schroeder_0035.jpg', 'Gerhard_Schroeder_0021.jpg', 'Gerhard_Schroeder_0020.jpg', 'Gerhard_Schroeder_0034.jpg', 'Gerhard_Schroeder_0008.jpg', 'Gerhard_Schroeder_0022.jpg', 'Gerhard_Schroeder_0036.jpg', 'Gerhard_Schroeder_0037.jpg', 'Gerhard_Schroeder_0023.jpg', 'Gerhard_Schroeder_0027.jpg', 'Gerhard_Schroeder_0033.jpg', 'Gerhard_Schroeder_0032.jpg', 'Gerhard_Schroeder_0026.jpg', 'Gerhard_Schroeder_0030.jpg', 'Gerhard_Schroeder_0024.jpg', 'Gerhard_Schroeder_0018.jpg', 'Gerhard_Schroeder_0019.jpg', 'Gerhard_Schroeder_0025.jpg', 'Gerhard_Schroeder_0031.jpg', 'Gerhard_Schroeder_0028.jpg', 'Gerhard_Schroeder_0014.jpg', 'Gerhard_Schroeder_0001.jpg', 'Gerhard_Schroeder_0015.jpg', 'Gerhard_Schroeder_0029.jpg', 'Gerhard_Schroeder_0003.jpg', 'Gerhard_Schroeder_0017.jpg', 'Gerhard_Schroeder_0016.jpg', 'Gerhard_Schroeder_0002.jpg', 'Gerhard_Schroeder_0006.jpg', 'Gerhard_Schroeder_0012.jpg', 'Gerhard_Schroeder_0013.jpg', 'Gerhard_Schroeder_0007.jpg', 'Gerhard_Schroeder_0011.jpg', 'Gerhard_Schroeder_0005.jpg', 'Gerhard_Schroeder_0039.jpg', 'Gerhard_Schroeder_0038.jpg', 'Gerhard_Schroeder_0004.jpg', 'Gerhard_Schroeder_0010.jpg', 'Gerhard_Schroeder_0103.jpg', 'Gerhard_Schroeder_0088.jpg', 'Gerhard_Schroeder_0077.jpg', 'Gerhard_Schroeder_0063.jpg', 'Gerhard_Schroeder_0062.jpg', 'Gerhard_Schroeder_0076.jpg', 'Gerhard_Schroeder_0089.jpg', 'Gerhard_Schroeder_0102.jpg', 'Gerhard_Schroeder_0100.jpg', 'Gerhard_Schroeder_0048.jpg', 'Gerhard_Schroeder_0060.jpg', 'Gerhard_Schroeder_0074.jpg', 'Gerhard_Schroeder_0075.jpg', 'Gerhard_Schroeder_0061.jpg', 'Gerhard_Schroeder_0049.jpg', 'Gerhard_Schroeder_0101.jpg', 'Gerhard_Schroeder_0105.jpg', 'Gerhard_Schroeder_0065.jpg', 'Gerhard_Schroeder_0071.jpg', 'Gerhard_Schroeder_0059.jpg', 'Gerhard_Schroeder_0058.jpg', 'Gerhard_Schroeder_0070.jpg', 'Gerhard_Schroeder_0064.jpg', 'Gerhard_Schroeder_0104.jpg', 'Gerhard_Schroeder_0106.jpg', 'Gerhard_Schroeder_0099.jpg', 'Gerhard_Schroeder_0072.jpg', 'Gerhard_Schroeder_0066.jpg', 'Gerhard_Schroeder_0067.jpg', 'Gerhard_Schroeder_0073.jpg', 'Gerhard_Schroeder_0098.jpg', 'Gerhard_Schroeder_0107.jpg']\n\n\n\nHugo_Chavez\n['Hugo_Chavez_0003.jpg', 'Hugo_Chavez_0017.jpg', 'Hugo_Chavez_0016.jpg', 'Hugo_Chavez_0002.jpg', 'Hugo_Chavez_0014.jpg', 'Hugo_Chavez_0028.jpg', 'Hugo_Chavez_0029.jpg', 'Hugo_Chavez_0001.jpg', 'Hugo_Chavez_0015.jpg', 'Hugo_Chavez_0039.jpg', 'Hugo_Chavez_0011.jpg', 'Hugo_Chavez_0005.jpg', 'Hugo_Chavez_0004.jpg', 'Hugo_Chavez_0010.jpg', 'Hugo_Chavez_0038.jpg', 'Hugo_Chavez_0006.jpg', 'Hugo_Chavez_0012.jpg', 'Hugo_Chavez_0013.jpg', 'Hugo_Chavez_0007.jpg', 'Hugo_Chavez_0060.jpg', 'Hugo_Chavez_0048.jpg', 'Hugo_Chavez_0049.jpg', 'Hugo_Chavez_0061.jpg', 'Hugo_Chavez_0063.jpg', 'Hugo_Chavez_0062.jpg', 'Hugo_Chavez_0066.jpg', 'Hugo_Chavez_0067.jpg', 'Hugo_Chavez_0059.jpg', 'Hugo_Chavez_0065.jpg', 'Hugo_Chavez_0071.jpg', 'Hugo_Chavez_0070.jpg', 'Hugo_Chavez_0064.jpg', 'Hugo_Chavez_0058.jpg', 'Hugo_Chavez_0041.jpg', 'Hugo_Chavez_0055.jpg', 'Hugo_Chavez_0069.jpg', 'Hugo_Chavez_0068.jpg', 'Hugo_Chavez_0054.jpg', 'Hugo_Chavez_0040.jpg', 'Hugo_Chavez_0056.jpg', 'Hugo_Chavez_0042.jpg', 'Hugo_Chavez_0043.jpg', 'Hugo_Chavez_0057.jpg', 'Hugo_Chavez_0053.jpg', 'Hugo_Chavez_0047.jpg', 'Hugo_Chavez_0046.jpg', 'Hugo_Chavez_0052.jpg', 'Hugo_Chavez_0044.jpg', 'Hugo_Chavez_0050.jpg', 'Hugo_Chavez_0051.jpg', 'Hugo_Chavez_0045.jpg', 'Hugo_Chavez_0022.jpg', 'Hugo_Chavez_0036.jpg', 'Hugo_Chavez_0037.jpg', 'Hugo_Chavez_0023.jpg', 'Hugo_Chavez_0035.jpg', 'Hugo_Chavez_0021.jpg', 'Hugo_Chavez_0009.jpg', 'Hugo_Chavez_0008.jpg', 'Hugo_Chavez_0020.jpg', 'Hugo_Chavez_0034.jpg', 'Hugo_Chavez_0018.jpg', 'Hugo_Chavez_0030.jpg', 'Hugo_Chavez_0024.jpg', 'Hugo_Chavez_0025.jpg', 'Hugo_Chavez_0031.jpg', 'Hugo_Chavez_0019.jpg', 'Hugo_Chavez_0027.jpg', 'Hugo_Chavez_0033.jpg', 'Hugo_Chavez_0032.jpg', 'Hugo_Chavez_0026.jpg']\n\n\n\nJacques_Chirac\n['Jacques_Chirac_0043.jpg', 'Jacques_Chirac_0042.jpg', 'Jacques_Chirac_0040.jpg', 'Jacques_Chirac_0041.jpg', 'Jacques_Chirac_0045.jpg', 'Jacques_Chirac_0051.jpg', 'Jacques_Chirac_0050.jpg', 'Jacques_Chirac_0044.jpg', 'Jacques_Chirac_0052.jpg', 'Jacques_Chirac_0046.jpg', 'Jacques_Chirac_0047.jpg', 'Jacques_Chirac_0034.jpg', 'Jacques_Chirac_0020.jpg', 'Jacques_Chirac_0008.jpg', 'Jacques_Chirac_0009.jpg', 'Jacques_Chirac_0021.jpg', 'Jacques_Chirac_0035.jpg', 'Jacques_Chirac_0023.jpg', 'Jacques_Chirac_0037.jpg', 'Jacques_Chirac_0036.jpg', 'Jacques_Chirac_0022.jpg', 'Jacques_Chirac_0026.jpg', 'Jacques_Chirac_0032.jpg', 'Jacques_Chirac_0033.jpg', 'Jacques_Chirac_0027.jpg', 'Jacques_Chirac_0019.jpg', 'Jacques_Chirac_0031.jpg', 'Jacques_Chirac_0025.jpg', 'Jacques_Chirac_0024.jpg', 'Jacques_Chirac_0030.jpg', 'Jacques_Chirac_0018.jpg', 'Jacques_Chirac_0015.jpg', 'Jacques_Chirac_0001.jpg', 'Jacques_Chirac_0029.jpg', 'Jacques_Chirac_0028.jpg', 'Jacques_Chirac_0014.jpg', 'Jacques_Chirac_0002.jpg', 'Jacques_Chirac_0016.jpg', 'Jacques_Chirac_0017.jpg', 'Jacques_Chirac_0003.jpg', 'Jacques_Chirac_0007.jpg', 'Jacques_Chirac_0013.jpg', 'Jacques_Chirac_0012.jpg', 'Jacques_Chirac_0006.jpg', 'Jacques_Chirac_0038.jpg', 'Jacques_Chirac_0010.jpg', 'Jacques_Chirac_0004.jpg', 'Jacques_Chirac_0005.jpg', 'Jacques_Chirac_0011.jpg', 'Jacques_Chirac_0039.jpg', 'Jacques_Chirac_0049.jpg', 'Jacques_Chirac_0048.jpg']\n\n\n\nJean_Chretien\n['Jean_Chretien_0026.jpg', 'Jean_Chretien_0032.jpg', 'Jean_Chretien_0033.jpg', 'Jean_Chretien_0027.jpg', 'Jean_Chretien_0031.jpg', 'Jean_Chretien_0025.jpg', 'Jean_Chretien_0019.jpg', 'Jean_Chretien_0018.jpg', 'Jean_Chretien_0024.jpg', 'Jean_Chretien_0030.jpg', 'Jean_Chretien_0008.jpg', 'Jean_Chretien_0034.jpg', 'Jean_Chretien_0020.jpg', 'Jean_Chretien_0021.jpg', 'Jean_Chretien_0035.jpg', 'Jean_Chretien_0009.jpg', 'Jean_Chretien_0023.jpg', 'Jean_Chretien_0037.jpg', 'Jean_Chretien_0036.jpg', 'Jean_Chretien_0022.jpg', 'Jean_Chretien_0045.jpg', 'Jean_Chretien_0051.jpg', 'Jean_Chretien_0050.jpg', 'Jean_Chretien_0044.jpg', 'Jean_Chretien_0052.jpg', 'Jean_Chretien_0046.jpg', 'Jean_Chretien_0047.jpg', 'Jean_Chretien_0053.jpg', 'Jean_Chretien_0043.jpg', 'Jean_Chretien_0042.jpg', 'Jean_Chretien_0040.jpg', 'Jean_Chretien_0054.jpg', 'Jean_Chretien_0055.jpg', 'Jean_Chretien_0041.jpg', 'Jean_Chretien_0049.jpg', 'Jean_Chretien_0048.jpg', 'Jean_Chretien_0007.jpg', 'Jean_Chretien_0013.jpg', 'Jean_Chretien_0012.jpg', 'Jean_Chretien_0006.jpg', 'Jean_Chretien_0010.jpg', 'Jean_Chretien_0004.jpg', 'Jean_Chretien_0038.jpg', 'Jean_Chretien_0039.jpg', 'Jean_Chretien_0005.jpg', 'Jean_Chretien_0011.jpg', 'Jean_Chretien_0029.jpg', 'Jean_Chretien_0015.jpg', 'Jean_Chretien_0001.jpg', 'Jean_Chretien_0014.jpg', 'Jean_Chretien_0028.jpg', 'Jean_Chretien_0002.jpg', 'Jean_Chretien_0016.jpg', 'Jean_Chretien_0017.jpg', 'Jean_Chretien_0003.jpg']\n\n\n\nJohn_Ashcroft\n['John_Ashcroft_0038.jpg', 'John_Ashcroft_0004.jpg', 'John_Ashcroft_0010.jpg', 'John_Ashcroft_0011.jpg', 'John_Ashcroft_0005.jpg', 'John_Ashcroft_0039.jpg', 'John_Ashcroft_0013.jpg', 'John_Ashcroft_0007.jpg', 'John_Ashcroft_0006.jpg', 'John_Ashcroft_0012.jpg', 'John_Ashcroft_0016.jpg', 'John_Ashcroft_0002.jpg', 'John_Ashcroft_0003.jpg', 'John_Ashcroft_0017.jpg', 'John_Ashcroft_0001.jpg', 'John_Ashcroft_0015.jpg', 'John_Ashcroft_0029.jpg', 'John_Ashcroft_0028.jpg', 'John_Ashcroft_0014.jpg', 'John_Ashcroft_0049.jpg', 'John_Ashcroft_0048.jpg', 'John_Ashcroft_0046.jpg', 'John_Ashcroft_0052.jpg', 'John_Ashcroft_0053.jpg', 'John_Ashcroft_0047.jpg', 'John_Ashcroft_0051.jpg', 'John_Ashcroft_0045.jpg', 'John_Ashcroft_0044.jpg', 'John_Ashcroft_0050.jpg', 'John_Ashcroft_0040.jpg', 'John_Ashcroft_0041.jpg', 'John_Ashcroft_0043.jpg', 'John_Ashcroft_0042.jpg', 'John_Ashcroft_0019.jpg', 'John_Ashcroft_0025.jpg', 'John_Ashcroft_0031.jpg', 'John_Ashcroft_0030.jpg', 'John_Ashcroft_0024.jpg', 'John_Ashcroft_0018.jpg', 'John_Ashcroft_0032.jpg', 'John_Ashcroft_0026.jpg', 'John_Ashcroft_0027.jpg', 'John_Ashcroft_0033.jpg', 'John_Ashcroft_0037.jpg', 'John_Ashcroft_0023.jpg', 'John_Ashcroft_0022.jpg', 'John_Ashcroft_0036.jpg', 'John_Ashcroft_0020.jpg', 'John_Ashcroft_0034.jpg', 'John_Ashcroft_0008.jpg', 'John_Ashcroft_0009.jpg', 'John_Ashcroft_0035.jpg', 'John_Ashcroft_0021.jpg']\n\n\n\nJunichiro_Koizumi\n['Junichiro_Koizumi_0045.jpg', 'Junichiro_Koizumi_0051.jpg', 'Junichiro_Koizumi_0050.jpg', 'Junichiro_Koizumi_0044.jpg', 'Junichiro_Koizumi_0052.jpg', 'Junichiro_Koizumi_0046.jpg', 'Junichiro_Koizumi_0047.jpg', 'Junichiro_Koizumi_0053.jpg', 'Junichiro_Koizumi_0057.jpg', 'Junichiro_Koizumi_0043.jpg', 'Junichiro_Koizumi_0042.jpg', 'Junichiro_Koizumi_0056.jpg', 'Junichiro_Koizumi_0040.jpg', 'Junichiro_Koizumi_0054.jpg', 'Junichiro_Koizumi_0055.jpg', 'Junichiro_Koizumi_0041.jpg', 'Junichiro_Koizumi_0026.jpg', 'Junichiro_Koizumi_0032.jpg', 'Junichiro_Koizumi_0033.jpg', 'Junichiro_Koizumi_0027.jpg', 'Junichiro_Koizumi_0031.jpg', 'Junichiro_Koizumi_0025.jpg', 'Junichiro_Koizumi_0019.jpg', 'Junichiro_Koizumi_0018.jpg', 'Junichiro_Koizumi_0024.jpg', 'Junichiro_Koizumi_0030.jpg', 'Junichiro_Koizumi_0008.jpg', 'Junichiro_Koizumi_0034.jpg', 'Junichiro_Koizumi_0020.jpg', 'Junichiro_Koizumi_0021.jpg', 'Junichiro_Koizumi_0035.jpg', 'Junichiro_Koizumi_0009.jpg', 'Junichiro_Koizumi_0023.jpg', 'Junichiro_Koizumi_0037.jpg', 'Junichiro_Koizumi_0036.jpg', 'Junichiro_Koizumi_0022.jpg', 'Junichiro_Koizumi_0007.jpg', 'Junichiro_Koizumi_0013.jpg', 'Junichiro_Koizumi_0012.jpg', 'Junichiro_Koizumi_0006.jpg', 'Junichiro_Koizumi_0010.jpg', 'Junichiro_Koizumi_0004.jpg', 'Junichiro_Koizumi_0038.jpg', 'Junichiro_Koizumi_0039.jpg', 'Junichiro_Koizumi_0005.jpg', 'Junichiro_Koizumi_0011.jpg', 'Junichiro_Koizumi_0029.jpg', 'Junichiro_Koizumi_0015.jpg', 'Junichiro_Koizumi_0001.jpg', 'Junichiro_Koizumi_0014.jpg', 'Junichiro_Koizumi_0028.jpg', 'Junichiro_Koizumi_0002.jpg', 'Junichiro_Koizumi_0016.jpg', 'Junichiro_Koizumi_0017.jpg', 'Junichiro_Koizumi_0003.jpg', 'Junichiro_Koizumi_0058.jpg', 'Junichiro_Koizumi_0059.jpg', 'Junichiro_Koizumi_0049.jpg', 'Junichiro_Koizumi_0060.jpg', 'Junichiro_Koizumi_0048.jpg']\n\n\n\nSerena_Williams\n['Serena_Williams_0012.jpg', 'Serena_Williams_0006.jpg', 'Serena_Williams_0007.jpg', 'Serena_Williams_0013.jpg', 'Serena_Williams_0005.jpg', 'Serena_Williams_0011.jpg', 'Serena_Williams_0039.jpg', 'Serena_Williams_0038.jpg', 'Serena_Williams_0010.jpg', 'Serena_Williams_0004.jpg', 'Serena_Williams_0028.jpg', 'Serena_Williams_0014.jpg', 'Serena_Williams_0015.jpg', 'Serena_Williams_0001.jpg', 'Serena_Williams_0029.jpg', 'Serena_Williams_0017.jpg', 'Serena_Williams_0003.jpg', 'Serena_Williams_0002.jpg', 'Serena_Williams_0016.jpg', 'Serena_Williams_0048.jpg', 'Serena_Williams_0049.jpg', 'Serena_Williams_0050.jpg', 'Serena_Williams_0044.jpg', 'Serena_Williams_0045.jpg', 'Serena_Williams_0051.jpg', 'Serena_Williams_0047.jpg', 'Serena_Williams_0052.jpg', 'Serena_Williams_0046.jpg', 'Serena_Williams_0042.jpg', 'Serena_Williams_0043.jpg', 'Serena_Williams_0041.jpg', 'Serena_Williams_0040.jpg', 'Serena_Williams_0033.jpg', 'Serena_Williams_0027.jpg', 'Serena_Williams_0026.jpg', 'Serena_Williams_0032.jpg', 'Serena_Williams_0024.jpg', 'Serena_Williams_0030.jpg', 'Serena_Williams_0018.jpg', 'Serena_Williams_0019.jpg', 'Serena_Williams_0031.jpg', 'Serena_Williams_0025.jpg', 'Serena_Williams_0009.jpg', 'Serena_Williams_0021.jpg', 'Serena_Williams_0035.jpg', 'Serena_Williams_0034.jpg', 'Serena_Williams_0020.jpg', 'Serena_Williams_0008.jpg', 'Serena_Williams_0036.jpg', 'Serena_Williams_0022.jpg', 'Serena_Williams_0023.jpg', 'Serena_Williams_0037.jpg']\n\n\n\nTony_Blair\n['Tony_Blair_0002.jpg', 'Tony_Blair_0016.jpg', 'Tony_Blair_0017.jpg', 'Tony_Blair_0003.jpg', 'Tony_Blair_0015.jpg', 'Tony_Blair_0001.jpg', 'Tony_Blair_0029.jpg', 'Tony_Blair_0028.jpg', 'Tony_Blair_0014.jpg', 'Tony_Blair_0038.jpg', 'Tony_Blair_0010.jpg', 'Tony_Blair_0004.jpg', 'Tony_Blair_0005.jpg', 'Tony_Blair_0011.jpg', 'Tony_Blair_0039.jpg', 'Tony_Blair_0007.jpg', 'Tony_Blair_0013.jpg', 'Tony_Blair_0012.jpg', 'Tony_Blair_0006.jpg', 'Tony_Blair_0061.jpg', 'Tony_Blair_0075.jpg', 'Tony_Blair_0049.jpg', 'Tony_Blair_0115.jpg', 'Tony_Blair_0101.jpg', 'Tony_Blair_0129.jpg', 'Tony_Blair_0128.jpg', 'Tony_Blair_0100.jpg', 'Tony_Blair_0114.jpg', 'Tony_Blair_0048.jpg', 'Tony_Blair_0074.jpg', 'Tony_Blair_0060.jpg', 'Tony_Blair_0089.jpg', 'Tony_Blair_0076.jpg', 'Tony_Blair_0062.jpg', 'Tony_Blair_0102.jpg', 'Tony_Blair_0116.jpg', 'Tony_Blair_0117.jpg', 'Tony_Blair_0103.jpg', 'Tony_Blair_0063.jpg', 'Tony_Blair_0077.jpg', 'Tony_Blair_0088.jpg', 'Tony_Blair_0098.jpg', 'Tony_Blair_0073.jpg', 'Tony_Blair_0067.jpg', 'Tony_Blair_0107.jpg', 'Tony_Blair_0113.jpg', 'Tony_Blair_0112.jpg', 'Tony_Blair_0106.jpg', 'Tony_Blair_0066.jpg', 'Tony_Blair_0072.jpg', 'Tony_Blair_0099.jpg', 'Tony_Blair_0058.jpg', 'Tony_Blair_0064.jpg', 'Tony_Blair_0070.jpg', 'Tony_Blair_0138.jpg', 'Tony_Blair_0110.jpg', 'Tony_Blair_0104.jpg', 'Tony_Blair_0105.jpg', 'Tony_Blair_0111.jpg', 'Tony_Blair_0139.jpg', 'Tony_Blair_0071.jpg', 'Tony_Blair_0065.jpg', 'Tony_Blair_0059.jpg', 'Tony_Blair_0083.jpg', 'Tony_Blair_0097.jpg', 'Tony_Blair_0040.jpg', 'Tony_Blair_0054.jpg', 'Tony_Blair_0068.jpg', 'Tony_Blair_0134.jpg', 'Tony_Blair_0120.jpg', 'Tony_Blair_0108.jpg', 'Tony_Blair_0109.jpg', 'Tony_Blair_0121.jpg', 'Tony_Blair_0135.jpg', 'Tony_Blair_0069.jpg', 'Tony_Blair_0055.jpg', 'Tony_Blair_0041.jpg', 'Tony_Blair_0096.jpg', 'Tony_Blair_0082.jpg', 'Tony_Blair_0094.jpg', 'Tony_Blair_0080.jpg', 'Tony_Blair_0057.jpg', 'Tony_Blair_0043.jpg', 'Tony_Blair_0123.jpg', 'Tony_Blair_0137.jpg', 'Tony_Blair_0136.jpg', 'Tony_Blair_0122.jpg', 'Tony_Blair_0042.jpg', 'Tony_Blair_0056.jpg', 'Tony_Blair_0081.jpg', 'Tony_Blair_0095.jpg', 'Tony_Blair_0091.jpg', 'Tony_Blair_0085.jpg', 'Tony_Blair_0052.jpg', 'Tony_Blair_0046.jpg', 'Tony_Blair_0126.jpg', 'Tony_Blair_0132.jpg', 'Tony_Blair_0133.jpg', 'Tony_Blair_0127.jpg', 'Tony_Blair_0047.jpg', 'Tony_Blair_0053.jpg', 'Tony_Blair_0084.jpg', 'Tony_Blair_0090.jpg', 'Tony_Blair_0086.jpg', 'Tony_Blair_0092.jpg', 'Tony_Blair_0079.jpg', 'Tony_Blair_0045.jpg', 'Tony_Blair_0051.jpg', 'Tony_Blair_0119.jpg', 'Tony_Blair_0131.jpg', 'Tony_Blair_0125.jpg', 'Tony_Blair_0124.jpg', 'Tony_Blair_0130.jpg', 'Tony_Blair_0118.jpg', 'Tony_Blair_0050.jpg', 'Tony_Blair_0044.jpg', 'Tony_Blair_0078.jpg', 'Tony_Blair_0093.jpg', 'Tony_Blair_0087.jpg', 'Tony_Blair_0023.jpg', 'Tony_Blair_0037.jpg', 'Tony_Blair_0143.jpg', 'Tony_Blair_0142.jpg', 'Tony_Blair_0036.jpg', 'Tony_Blair_0022.jpg', 'Tony_Blair_0034.jpg', 'Tony_Blair_0020.jpg', 'Tony_Blair_0008.jpg', 'Tony_Blair_0140.jpg', 'Tony_Blair_0141.jpg', 'Tony_Blair_0009.jpg', 'Tony_Blair_0021.jpg', 'Tony_Blair_0035.jpg', 'Tony_Blair_0019.jpg', 'Tony_Blair_0031.jpg', 'Tony_Blair_0025.jpg', 'Tony_Blair_0144.jpg', 'Tony_Blair_0024.jpg', 'Tony_Blair_0030.jpg', 'Tony_Blair_0018.jpg', 'Tony_Blair_0026.jpg', 'Tony_Blair_0032.jpg', 'Tony_Blair_0033.jpg', 'Tony_Blair_0027.jpg']\n\n\n\n"
],
[
"image = cv2.imread(Dataset_path+'lfw-deepfunneled/Colin_Powell/Colin_Powell_0007.jpg')\nfaces = return_bbx(image)\n(x,y,w,h) = faces[0]\ncropped = image[x:x+w, y:y+h]\nplt.imshow(cropped)\nprint(cropped.shape)",
"(112, 112, 3)\n"
],
[
"resized = cv2.resize(cropped, (64,64), interpolation = cv2.INTER_AREA)\nplt.imshow(resized)\nprint(resized.shape)",
"(64, 64, 3)\n"
],
[
"X = []\nY = []\n\nfor _, [name,__] in Celebs.iterrows():\n celeb_path = Dataset_path+'lfw-deepfunneled/'+name+'/'\n \n images_paths = get_files(celeb_path)\n for image_path in images_paths:\n image = cv2.imread(celeb_path+image_path,1)\n faces = return_bbx(image)\n if len(faces) == 1:\n (x,y,w,h) = faces[0]\n cropped = image[x:x+w, y:y+h]\n dim = (64, 64)\n resized = cv2.resize(cropped, dim, interpolation = cv2.INTER_AREA)\n image = np.array(resized).astype(\"float32\")\n X.append(image)\n Y.append(name)\n\nX_data = np.array(X)\nY_data = np.array(Y)",
"_____no_output_____"
],
[
"X_data = np.array(X)\nY_data = np.array(Y)\n\nprint(X_data.shape)\nprint(Y_data.shape)",
"(1431, 64, 64, 3)\n(1431,)\n"
],
[
"import mahotas\nbins = 20\n\ndef fd_hu_moments(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n feature = cv2.HuMoments(cv2.moments(image)).flatten()\n return feature\n\ndef fd_haralick(image): # convert the image to grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY).astype(int)\n # compute the haralick texture feature vector\n haralick = mahotas.features.haralick(gray).mean(axis=0)\n return haralick\n\ndef fd_lbp(image): # convert the image to grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY).astype(int)\n # compute the haralick texture feature vector\n haralick = mahotas.features.lbp(gray, 5, 5).mean(axis=0)\n return haralick\n \ndef fd_histogram(image, mask=None):\n # convert the image to HSV color-space\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n # compute the color histogram\n hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256])\n # normalize the histogram\n cv2.normalize(hist, hist)\n return hist.flatten()\n\ndef get_global_features(image):\n global_feature = np.hstack([fd_histogram(image), fd_haralick(image), fd_lbp(image), fd_hu_moments(image)])\n return global_feature",
"_____no_output_____"
],
[
"X_temp = []\nfor i in range(len(X_data)):\n X_temp.append(get_global_features(X_data[i]))",
"_____no_output_____"
],
[
"X_data = np.array(X_temp)\nprint(X_data.shape)\nprint(Y_data.shape)",
"(1431, 8021)\n(1431,)\n"
],
[
"from collections import Counter\n\ncounter = Counter(Y_data)\nprint(counter)",
"Counter({'George_W_Bush': 480, 'Colin_Powell': 213, 'Tony_Blair': 138, 'Donald_Rumsfeld': 109, 'Gerhard_Schroeder': 106, 'Ariel_Sharon': 70, 'Hugo_Chavez': 67, 'Jacques_Chirac': 51, 'Jean_Chretien': 51, 'Junichiro_Koizumi': 51, 'Serena_Williams': 48, 'John_Ashcroft': 47})\n"
],
[
"from imblearn.under_sampling import NearMiss\n\nundersample = NearMiss(version=1, n_neighbors=3)\nX_resampled, Y_resampled = undersample.fit_resample(X_data,Y_data)\nX_data = X_resampled\nY_data = Y_resampled\ncounter = Counter(Y_data)\nprint(counter)\n\ndel undersample\ndel X_resampled\ndel Y_resampled\ndel counter",
"Counter({'Ariel_Sharon': 47, 'Colin_Powell': 47, 'Donald_Rumsfeld': 47, 'George_W_Bush': 47, 'Gerhard_Schroeder': 47, 'Hugo_Chavez': 47, 'Jacques_Chirac': 47, 'Jean_Chretien': 47, 'John_Ashcroft': 47, 'Junichiro_Koizumi': 47, 'Serena_Williams': 47, 'Tony_Blair': 47})\n"
],
[
"print(X_data.shape)\nprint(Y_data.shape)",
"(564, 8021)\n(564,)\n"
],
[
"from sklearn.preprocessing import MinMaxScaler, LabelEncoder\n\nscaler = MinMaxScaler(feature_range=(0, 1))\nX_data = scaler.fit_transform(X_data)\nlabelencoder = LabelEncoder()\nY_data = labelencoder.fit_transform(Y_data)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split,GridSearchCV\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom sklearn import svm\n\nX_train, X_test, y_train, y_test = train_test_split(X_data, Y_data, train_size=0.8, random_state = 0)",
"_____no_output_____"
],
[
"tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-1, 1e-2, 1e-3, 1e-4],\n 'C': [1, 10, 100, 1000]},\n {'kernel': ['linear'], 'C': [1, 10, 100, 1000]},\n {'kernel': ['poly'], 'degree': [0, 1, 2, 3, 4, 5, 6],\n 'C': [1, 10, 100, 1000]},\n {'kernel': ['sigmoid'], 'C': [1, 10, 100, 1000]}]",
"_____no_output_____"
],
[
"scores = ['accuracy']\n\n\nfor score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n print()\n\n clf = GridSearchCV(svm.SVC(), tuned_parameters, scoring='%s' % score)\n clf.fit(X_train, y_train)\n\n print(\"Best parameters set found on development set:\")\n print()\n print(clf.best_params_)\n print()\n print(\"Grid scores on development set:\")\n print()\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n print()\n\n print(\"Detailed classification report:\")\n print()\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n print()\n y_true, y_pred = y_test, clf.predict(X_test)\n print(classification_report(y_true, y_pred))\n print()",
"# Tuning hyper-parameters for accuracy\n\nBest parameters set found on development set:\n\n{'C': 100, 'gamma': 0.1, 'kernel': 'rbf'}\n\nGrid scores on development set:\n\n0.290 (+/-0.092) for {'C': 1, 'gamma': 0.1, 'kernel': 'rbf'}\n0.155 (+/-0.019) for {'C': 1, 'gamma': 0.01, 'kernel': 'rbf'}\n0.100 (+/-0.024) for {'C': 1, 'gamma': 0.001, 'kernel': 'rbf'}\n0.100 (+/-0.024) for {'C': 1, 'gamma': 0.0001, 'kernel': 'rbf'}\n0.308 (+/-0.145) for {'C': 10, 'gamma': 0.1, 'kernel': 'rbf'}\n0.295 (+/-0.086) for {'C': 10, 'gamma': 0.01, 'kernel': 'rbf'}\n0.155 (+/-0.019) for {'C': 10, 'gamma': 0.001, 'kernel': 'rbf'}\n0.100 (+/-0.024) for {'C': 10, 'gamma': 0.0001, 'kernel': 'rbf'}\n0.315 (+/-0.135) for {'C': 100, 'gamma': 0.1, 'kernel': 'rbf'}\n0.293 (+/-0.092) for {'C': 100, 'gamma': 0.01, 'kernel': 'rbf'}\n0.304 (+/-0.090) for {'C': 100, 'gamma': 0.001, 'kernel': 'rbf'}\n0.155 (+/-0.019) for {'C': 100, 'gamma': 0.0001, 'kernel': 'rbf'}\n0.315 (+/-0.135) for {'C': 1000, 'gamma': 0.1, 'kernel': 'rbf'}\n0.293 (+/-0.116) for {'C': 1000, 'gamma': 0.01, 'kernel': 'rbf'}\n0.295 (+/-0.112) for {'C': 1000, 'gamma': 0.001, 'kernel': 'rbf'}\n0.308 (+/-0.099) for {'C': 1000, 'gamma': 0.0001, 'kernel': 'rbf'}\n0.286 (+/-0.083) for {'C': 1, 'kernel': 'linear'}\n0.288 (+/-0.146) for {'C': 10, 'kernel': 'linear'}\n0.284 (+/-0.139) for {'C': 100, 'kernel': 'linear'}\n0.284 (+/-0.139) for {'C': 1000, 'kernel': 'linear'}\n0.089 (+/-0.001) for {'C': 1, 'degree': 0, 'kernel': 'poly'}\n0.284 (+/-0.100) for {'C': 1, 'degree': 1, 'kernel': 'poly'}\n0.284 (+/-0.086) for {'C': 1, 'degree': 2, 'kernel': 'poly'}\n0.270 (+/-0.073) for {'C': 1, 'degree': 3, 'kernel': 'poly'}\n0.239 (+/-0.037) for {'C': 1, 'degree': 4, 'kernel': 'poly'}\n0.228 (+/-0.039) for {'C': 1, 'degree': 5, 'kernel': 'poly'}\n0.208 (+/-0.049) for {'C': 1, 'degree': 6, 'kernel': 'poly'}\n0.089 (+/-0.001) for {'C': 10, 'degree': 0, 'kernel': 'poly'}\n0.282 (+/-0.100) for {'C': 10, 'degree': 1, 'kernel': 'poly'}\n0.302 (+/-0.106) for {'C': 10, 'degree': 2, 'kernel': 'poly'}\n0.288 (+/-0.128) for {'C': 10, 'degree': 3, 'kernel': 'poly'}\n0.282 (+/-0.128) for {'C': 10, 'degree': 4, 'kernel': 'poly'}\n0.279 (+/-0.143) for {'C': 10, 'degree': 5, 'kernel': 'poly'}\n0.268 (+/-0.134) for {'C': 10, 'degree': 6, 'kernel': 'poly'}\n0.089 (+/-0.001) for {'C': 100, 'degree': 0, 'kernel': 'poly'}\n0.286 (+/-0.138) for {'C': 100, 'degree': 1, 'kernel': 'poly'}\n0.304 (+/-0.119) for {'C': 100, 'degree': 2, 'kernel': 'poly'}\n0.304 (+/-0.142) for {'C': 100, 'degree': 3, 'kernel': 'poly'}\n0.295 (+/-0.130) for {'C': 100, 'degree': 4, 'kernel': 'poly'}\n0.286 (+/-0.159) for {'C': 100, 'degree': 5, 'kernel': 'poly'}\n0.282 (+/-0.159) for {'C': 100, 'degree': 6, 'kernel': 'poly'}\n0.089 (+/-0.001) for {'C': 1000, 'degree': 0, 'kernel': 'poly'}\n0.284 (+/-0.139) for {'C': 1000, 'degree': 1, 'kernel': 'poly'}\n0.304 (+/-0.119) for {'C': 1000, 'degree': 2, 'kernel': 'poly'}\n0.304 (+/-0.142) for {'C': 1000, 'degree': 3, 'kernel': 'poly'}\n0.295 (+/-0.130) for {'C': 1000, 'degree': 4, 'kernel': 'poly'}\n0.286 (+/-0.159) for {'C': 1000, 'degree': 5, 'kernel': 'poly'}\n0.282 (+/-0.159) for {'C': 1000, 'degree': 6, 'kernel': 'poly'}\n0.259 (+/-0.065) for {'C': 1, 'kernel': 'sigmoid'}\n0.284 (+/-0.099) for {'C': 10, 'kernel': 'sigmoid'}\n0.246 (+/-0.102) for {'C': 100, 'kernel': 'sigmoid'}\n0.228 (+/-0.075) for {'C': 1000, 'kernel': 'sigmoid'}\n\nDetailed classification report:\n\nThe model is trained on the full development set.\nThe scores are computed on the full evaluation set.\n\n precision recall f1-score support\n\n 0 0.27 0.38 0.32 8\n 1 0.38 0.43 0.40 7\n 2 0.10 0.12 0.11 8\n 3 0.29 0.22 0.25 9\n 4 0.08 0.11 0.10 9\n 5 0.43 0.60 0.50 10\n 6 0.40 0.22 0.29 9\n 7 0.25 0.22 0.24 9\n 8 0.29 0.11 0.16 18\n 9 0.15 0.29 0.20 7\n 10 0.82 1.00 0.90 9\n 11 0.29 0.20 0.24 10\n\n accuracy 0.31 113\n macro avg 0.31 0.33 0.31 113\nweighted avg 0.31 0.31 0.30 113\n\n\n"
],
[
"rbf = svm.SVC(kernel='rbf', gamma=0.1, C=100).fit(X_train, y_train)\naccuracy_rbf = rbf.score(X_train, y_train)\nprint(\"Training Accuracy Radial Basis Kernel:\", accuracy_rbf*100)\naccuracy_rbf = rbf.score(X_test, y_test)\nprint(\"Testing Accuracy Radial Basis Kernel:\", accuracy_rbf*100)",
"Training Accuracy Radial Basis Kernel: 100.0\nTesting Accuracy Radial Basis Kernel: 30.973451327433626\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74f87eb053ff0695c656fa7c0cf393970e07f2b | 280,123 | ipynb | Jupyter Notebook | Regressions/AirBnB Price prediction/.ipynb_checkpoints/Apprentice_Challenge_2021_Answers-Copy1-checkpoint.ipynb | shobhit009/Machine_Learning_Projects | 8be7bbd6d918b91f440074ccf76bc464d3db91dd | [
"MIT"
] | null | null | null | Regressions/AirBnB Price prediction/.ipynb_checkpoints/Apprentice_Challenge_2021_Answers-Copy1-checkpoint.ipynb | shobhit009/Machine_Learning_Projects | 8be7bbd6d918b91f440074ccf76bc464d3db91dd | [
"MIT"
] | null | null | null | Regressions/AirBnB Price prediction/.ipynb_checkpoints/Apprentice_Challenge_2021_Answers-Copy1-checkpoint.ipynb | shobhit009/Machine_Learning_Projects | 8be7bbd6d918b91f440074ccf76bc464d3db91dd | [
"MIT"
] | null | null | null | 73.891585 | 70,676 | 0.670684 | [
[
[
"# Apprentice Challenge\n\nThis challenge is diagnostic of your current python pandas, matplotlib/seaborn, and numpy skills. These diagnostics will help inform your selection into the Machine Learning Guild's Apprentice program. Please ensure you are using Python 3 as the notebook won't work in 2.7\n\n## Challenge Background: AirBnB Price Prediction\n\n\n\n\n\nAirBnB is a popular technology platform that serves as an online marketplace for lodging. Using AirBnB, homeowners (called \"hosts\") can rent out their properties to travelers. Some hosts rent out their properties in full (e.g. an entire house or apartment), whereas some rent out individual rooms separately. Units are rented out for various durations, anywhere from one night up to a month or more, with some hosts specifying a minimum number of nights required for a rental.\n\nOver time, this platform has proven to be a powerful competitor to the traditional hotel and bed & breakfast industries, often competing on price, convenience, comfort, and/or the unique nature of its listed properties. \n\nThe company is constantly onboarding new rental hosts in NYC, and many of these hosts don’t have any idea how much customers would be willing to pay for their rental units. AirBnB has hired you, an analytics consultant, to use their historical NYC rental data and build a predictive model that their new hosts in the city can use to get a sense of what to charge.\n\nIn this data analysis programming challenge, you’ll have to clean the data, engineer some new modeling features, and finally, build and test the predictive model.\n\n\n## Instructions\n\nYou need to know your way around `pandas` DataFrames and basic Python programming. You have **90 minutes** to complete the challenge. We strongly discourage searching the internet for challenge answers.\n\nYour first task:\n* Read the first paragraph above to familiarize yourself with the topic.\n* Feel free to poke around with the iPython notebook.\n* When you are ready, proceed to the next task.\n* Complete each of the tasks listed below in the notebook.\n* You need to provide your code for challenge in the cells which say \"-- YOUR CODE FOR TASK NUMBER --\"\n\n**NOTE: After each Jupyter cell in which you will enter your code, there is an additional cell that will check your outputs. If your outputs are incorrect, this will be printed out for your awareness, and the correct outputs will be loaded in so that you can continue with the assessment. That being said, if you feel you are able to correct your code so that it generates the correct outputs, you should do so in order to get as many points as possible.**\n\n**Please reach out to [Lauren Moy](mailto:[email protected]) with any questions.**",
"_____no_output_____"
]
],
[
[
"# Import packages\nimport pandas as pd\nimport numpy as np\nimport data_load_files\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\nfrom sklearn.metrics import mean_squared_error,r2_score, mean_absolute_error\nfrom sklearn import preprocessing\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"# Task 1\n\n**Instructions**\n\nAirBnB just sent you the NYC rentals data as a text file (`AB_NYC_2019_pt1.csv`). First, we'll need to read that text file in as a pandas DataFrame called `df`. As it turns out, AirBnB also received an additional update (`AB_NYC_2019_pt2.csv`) overnight to add to the main dataset, so you'll have to read that in and append it to the main DataFrame as well.\n\nNext, to better understand the data, print the first 10 rows of the DataFrame, then print the data types of the DataFrame's columns\n\n\n**Expected Output**\n* (1pt) Read in the main data file as `df`, a pandas DataFrame and load in the second data file as `df2`\n* (1pt) Append the new data file to `df`\n* (1pt) Print the first 10 rows of the df and the datatypes of the df",
"_____no_output_____"
]
],
[
[
"# Task 1\n\n# -- YOUR CODE FOR TASK 1 --\n\n#Import primary AirBnB data file as a pandas DataFrame\n# df = ...\ndf = pd.read_csv('AB_NYC_2019_pt1.csv')\n\n\n#Import the additional AirBnB data file as a pandas DataFrame and append it to the primary data DataFrame\n# df2 = ...\ndf2 = pd.read_csv('AB_NYC_2019_pt2.csv')\n\n\n#Append df2 to df\n# df = ...\ndf = df.append(df2)\n\n\n\n#Print the first 10 rows of the df, and print the data types of the df's columns\n# Your code here\nprint(df.head(10))\nprint(df.dtypes)\n",
" name host_id host_name \\\n0 Clean & quiet apt home by the park 2787 John \n1 Skylit Midtown Castle 2845 Jennifer \n2 THE VILLAGE OF HARLEM....NEW YORK ! 4632 Elisabeth \n3 Cozy Entire Floor of Brownstone 4869 LisaRoxanne \n4 Entire Apt: Spacious Studio/Loft by central park 7192 Laura \n5 Large Cozy 1 BR Apartment In Midtown East 7322 Chris \n6 BlissArtsSpace! 7356 Garon \n7 Large Furnished Room Near B'way 8967 Shunichi \n8 Cozy Clean Guest Room - Family Apt 7490 MaryEllen \n9 Cute & Cozy Lower East Side 1 bdrm 7549 Ben \n\n neighbourhood_group neighbourhood latitude longitude \\\n0 Brooklyn Kensington 40.64749 -73.97237 \n1 Manhattan Midtown 40.75362 -73.98377 \n2 Manhattan Harlem 40.80902 -73.94190 \n3 Brooklyn Clinton Hill 40.68514 -73.95976 \n4 Manhattan East Harlem 40.79851 -73.94399 \n5 Manhattan Murray Hill 40.74767 -73.97500 \n6 Brooklyn Bedford-Stuyvesant 40.68688 -73.95596 \n7 Manhattan Hell's Kitchen 40.76489 -73.98493 \n8 Manhattan Upper West Side 40.80178 -73.96723 \n9 Manhattan Chinatown 40.71344 -73.99037 \n\n room_type price minimum_nights number_of_reviews last_review \\\n0 Private room 149 1 9 2018-10-19 \n1 Entire home/apt 225 1 45 2019-05-21 \n2 Private room 150 3 0 NaN \n3 Entire home/apt 89 1 270 2019-07-05 \n4 Entire home/apt 80 10 9 2018-11-19 \n5 Entire home/apt 200 3 74 2019-06-22 \n6 Private room 60 45 49 2017-10-05 \n7 Private room 79 2 430 2019-06-24 \n8 Private room 79 2 118 2017-07-21 \n9 Entire home/apt 150 1 160 2019-06-09 \n\n reviews_per_month calculated_host_listings_count availability_365 \n0 0.21 6 365 \n1 0.38 2 355 \n2 NaN 1 365 \n3 4.64 1 194 \n4 0.10 1 0 \n5 0.59 1 129 \n6 0.40 1 0 \n7 3.47 1 220 \n8 0.99 1 0 \n9 1.33 4 188 \nname object\nhost_id int64\nhost_name object\nneighbourhood_group object\nneighbourhood object\nlatitude float64\nlongitude float64\nroom_type object\nprice int64\nminimum_nights int64\nnumber_of_reviews int64\nlast_review object\nreviews_per_month float64\ncalculated_host_listings_count int64\navailability_365 int64\ndtype: object\n"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\n\ntask_1_check = data_load_files.TASK_1_OUTPUT\ntask_1_shape = task_1_check.shape\ntask_1_columns = task_1_check.columns\n\nif df.shape == task_1_shape and list(df.columns) == list(task_1_columns):\n print('df is correct')\nelse: \n df = task_1_check\n print(\"'`df' is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
],
[
[
"# Task 2 Part 1\n**Instructions**\n\nAirBnB is aware that some of its listings are missing values. Let's see if we can determine how much of the dataset is affected. Start by printing out the number of rows in the df that contain any null (NaN) values.\n\nOnce you've done that, drop those rows from the df before any further analysis is conducted.\n\nOne of your fellow analytics consultants who was also exploring this data has been having trouble with their analysis. It seems to be due to a data type mismatch. In particular, they need the `last_review` column to be of type Datetime. Convert that column to Datetime for your teammate.\n\n\n**Expected Output**\n\n- (1pt) Correct number of rows that conain any null (NaN) values stored in a variable `num_nan`\n- (1pt) Updated DataFrame `df` where all rows that contain any NaNs have been dropped\n- (1pt) Updated DataFrame `df` where the dtype of column `last_review` is `datetime`\n",
"_____no_output_____"
]
],
[
[
"# Task 2 (Part 1)\n\n# Import packages\nimport datetime\n\n\n# -- YOUR CODE FOR TASK 2 (PART 1) --\n\n#Print out the number of rows in the df that contain any null (NaN) values\n# Your code here\nprint(df.isna().any(axis=1).sum())\n\n\n#Drop all rows with any NaNs from the DataFrame\n# Your code here\ndf.dropna(axis=0, inplace=True)\n\n\n#Convert the ‘last_review’ column to DateTime\n# Your code here\ndf['last_review'] = pd.to_datetime(df['last_review'])",
"10074\n"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\n\n## Checks\ntask_2_part_1_check = data_load_files.TASK_2_PART_1_OUTPUT\n\nshape_check = (df.shape == task_2_part_1_check.shape)\ncolumns_check = (list(df.columns) == list(task_2_part_1_check.columns))\ntype_check = (type(df['last_review']) == type(task_2_part_1_check['last_review']))\n\nif shape_check and columns_check and type_check:\n print('df is correct')\nelse: \n df = task_2_part_1_check\n print(\"'`df' is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
],
[
[
"# Task 2 Part 2\n\n**Instructions**\n\nAirbnb team wants to further explore the expansion of their listings in the Neighbourhood Group of Brooklyn. Create a DataFrame `df_brooklyn` containing only these listings, and then, using that DataFrame, create a new DataFrame `df_brooklyn_prices_room_type` showing the mean price per room type, which will help them determine the room types that generate the highest revenue.\n\nAirBnB also wants to understand which neighbourhoods in the Brooklyn neighbourhood group are most common among its listings. Create a pandas Series `top_10_brooklyn_series` that contains the top 10 most common neighborhoods (as the index) in Brooklyn and the number of listings each represents.\n\nFinally, Airbnb has decided to launch an advertising campaign to promote private rooms in Brooklyn, and they have asked you for some additional information on the price range for these listings. Let's help them understand the breakdown of prices for private rooms in this neighborhood group. To do so, using the DataFrame `df_brooklyn_priv` (which has already been created for you), print out some summary statistics (specifically, the minimum, first quartile, median, third quartile, and maximum price values).\n\n**Expected Output**\n- (1pt) A DataFrame `df_brooklyn` that only contains listings in Brooklyn. Don't forget to reset the index if needed\n- (1pt) A Series `top_10_brooklyn_series` that contains the number of listings for only the top 10 most common neighborhoods in Brooklyn\n- (1pt) Create a dataframe `df_brooklyn_prices_room_type` showing the average (mean) prices of the listings for each room type in Brooklyn. This new DataFrame should contain only three columns: `neighbourhood_group`,`room_type`, and `price`. Don't forget to reset the index, if needed.",
"_____no_output_____"
]
],
[
[
"# Run this cell \npd.set_option('mode.chained_assignment', None)",
"_____no_output_____"
],
[
"#Create a pandas DataFrame containing only listings in the Brooklyn neighborhood group. Don't\n#forget to reset the index!\n#df_brooklyn = ...\n\ndf_brooklyn = df[(df['neighbourhood_group']=='Brooklyn')].reset_index(drop=True)\n\n#Printing Results\ndf_brooklyn\n\n# df_brooklyn.to_csv('data/TASK_2_PART_2_BKN_OUTPUT.csv', index = False)\n# df_brooklyn.shape",
"_____no_output_____"
],
[
"# Task 2 (Part 2)\n\n\n# -- YOUR CODE FOR TASK 2 (PART 2) --\n\n#Create a pandas Series showing the number of listings for each of the top 10 most common neighbourhoods\n#top_10_brooklyn_series = ...\ntop_10_brooklyn_series = df_brooklyn['neighbourhood'].value_counts()[0:10]\n\n\n#Printing Results\ntop_10_brooklyn_series\n",
"_____no_output_____"
],
[
"#Create a dataframe showing the average (mean) prices of the listings in Brooklyn\n#df_brooklyn_prices_room_type = ...\ndf_brooklyn_prices_room_type = df_brooklyn[['neighbourhood_group','room_type','price']].groupby(\\\n ['neighbourhood_group','room_type']).mean().reset_index()\n\n#Printing Results\ndf_brooklyn_prices_room_type\n",
"_____no_output_____"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\n\n## Checks\ntask_2_part_2_top_10_check = data_load_files.TASK_2_PART_2_T10_OUTPUT\ntask_2_part_2_brooklyn_check = data_load_files.TASK_2_PART_2_BKN_OUTPUT\ntask_2_part_2_rm_prices_check = data_load_files.TASK_2_PART_2_RM_PRICES_OUTPUT\n\nprice_shape_check = (df_brooklyn_prices_room_type.shape == task_2_part_2_rm_prices_check.shape)\nprice_columns_check = (list(df_brooklyn_prices_room_type.columns) == list(task_2_part_2_rm_prices_check.columns))\nprice_avg_check = (df_brooklyn_prices_room_type.price.mean() == task_2_part_2_rm_prices_check.price.mean())\n\nbrooklyn_shape_check = (df_brooklyn.shape == task_2_part_2_brooklyn_check.shape)\n\nbrooklyn_top_10_avg_check = (top_10_brooklyn_series.mean() == task_2_part_2_top_10_check.mean())\n\nif price_shape_check and price_columns_check and price_avg_check and brooklyn_shape_check and brooklyn_top_10_avg_check:\n print('dfs are correct')\nelse: \n df_brooklyn_prices_room_type = task_2_part_2_rm_prices_check\n df_brooklyn = task_2_part_2_brooklyn_check\n top_10_brooklyn_series = task_2_part_2_top_10_check\n print(\"df's are incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"dfs are correct\n"
]
],
[
[
"# Task 3, part 1\n\n**Instructions**\n\nWe want to be able to model using the ‘neighbourhood’ column as a feature, but to do so we’ll have to transform it into a series of binary features (one per neighbourhood), and right now there are way too many unique values. To solve this problem, we will re-label all neighbourhoods not in the top 10 neighbourhoods as “Other”. First, you'll create a list of the top 10 most common neighbourhoods in Brooklyn, leveraging the `top_10_brooklyn_series` Series that you created earlier. Then you will replace all neighbourhood values NOT in that list with the value 'Other'.\n\nAirBnB believes that long lags between reviews can be an indicator that the rental unit is less desirable (not being booked often). To enable us to test this later, create a new column representing the number of days it has been since the last review was posted. \n\nAirBnB believes that ‘Entire home/apt’ rentals in Brooklyn can command a premium; hence, they would like you to separately identify such listings using a new binary column.\n\n**Expected Output**\n- (1pt) A list of neighborhoods `top_10_brooklyn_list` that contains the top 10 neighborhoods in brooklyn by largest count of Air BnBs\n- (1pt) A column `neighbourhood` that displays the neighbourhood name if it is in the `top_10_brooklyn_list`, otherwise displays \"Other\"\n- (1pt) Calculate the `days_since_review` and add as a column in `df_brooklyn`\n- (1pt) Create a binary column `brooklyn_whole` that create a binary indicator based on 'room_type'=='Entire home/apt'\n",
"_____no_output_____"
]
],
[
[
"#Task 3\n\n# -- YOUR CODE FOR TASK 3 --\n\n#Create a list of the top 10 most common neighbourhoods, using the 'top_10_brooklyn_series'\n#that you created earlier\n#top_10_brooklyn_list = ...\ntop_10_brooklyn_list = list(top_10_brooklyn_series.index.values)\n\n#Replace all 'neighbourhood' column values NOT in the top 10 with 'Other'\n#df_brooklyn['neighbourhood'] = ...\ndf_brooklyn['neighbourhood'] = np.where(df_brooklyn['neighbourhood'].isin(top_10_brooklyn_list),\\\n df_brooklyn['neighbourhood'], 'Other')\n\ndf_brooklyn['neighbourhood'].value_counts() #This isn't required, it just shows the results\n",
"_____no_output_____"
],
[
"#df['days_since_review']=...\ndf_brooklyn['days_since_review']=(df_brooklyn['last_review'] - pd.Timestamp('today')).dt.days\n\n# Print Results\ndf_brooklyn",
"_____no_output_____"
],
[
"#df['brooklyn_whole']=...\ndf_brooklyn['brooklyn_whole']=np.where((df_brooklyn['room_type']=='Entire home/apt'), True, False)\ndf_brooklyn",
"_____no_output_____"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\n\ntask_3_part_1_check = data_load_files.TASK_3_PART_1_OUTPUT\n\nbrooklyn_shape_check = (df_brooklyn.shape == task_3_part_1_check.shape)\nbrooklyn_columns_check = (list(df_brooklyn.columns) == list(task_3_part_1_check.columns))\n\nif brooklyn_shape_check and brooklyn_columns_check:\n print('df is correct')\nelse: \n df_brooklyn = task_3_part_1_check\n print(\"df is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
],
[
[
"# Task 3, Part 2\n\nYou want to take a closer look at price in the dataset. You decide to categorize rental properties by their affordability. Categorize each listing into one of three price categories by binning the `price` column and creating a new `price_category` column.",
"_____no_output_____"
]
],
[
[
"price_bins = [0, 100, 200, np.inf]\nprice_cat = ['low', 'medium', 'high']\n\n#df['price_category'] = ...\ndf_brooklyn['price_category'] = pd.cut(df_brooklyn['price'], price_bins, labels=price_cat)\ndf_brooklyn\n",
"_____no_output_____"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\n\ntask_3_part_2_check = data_load_files.TASK_3_PART_2_OUTPUT\n\nbrooklyn_shape_check = (df_brooklyn.shape == task_3_part_2_check.shape)\nbrooklyn_columns_check = (list(df_brooklyn.columns) == list(task_3_part_2_check.columns))\n\nif brooklyn_shape_check and brooklyn_columns_check:\n print('df is correct')\nelse: \n df_brooklyn = task_3_part_2_check\n print(\"df is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
],
[
[
"# Task 3, Part 3\n\n**Instructions**\n* Create a barchart of your dataset `Price Category` from Part 2, comparing the number of rentals in each category.\n\n\n**Expected Output**\n* barchart with listing count as bar\n* grouped by 3 price categories\n",
"_____no_output_____"
]
],
[
[
"pd.value_counts(df_brooklyn['price_category']).plot.bar()",
"_____no_output_____"
]
],
[
[
"# Task 3, Extra\n\nYou would like to see the above plot broken down by top 10 neighborhoods. Use Seaborn to create 10 bar graphs, one for each top 10 neighborhood, breaking down the listings in that neighborhood by price category and using hue to separate out the room types. Please use the seaborn plotting library. You can install seaborn using `pip`. You can read about the API for the catplot [here](https://seaborn.pydata.org/generated/seaborn.catplot.html#seaborn.catplot). ",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\n\n# sns.catplot(#<<enter your code here>>#, col_wrap=3,height=3, legend = True)\nsns.catplot(x=\"price_category\", col=\"neighbourhood\", data=df_brooklyn, kind='count', hue='room_type', col_wrap=3,height=3, legend = True)",
"_____no_output_____"
]
],
[
[
"# Task 4 Part 1\n\n**Instructions **\n\nAirbnb's business team would like to understand the revenue the hosts make in Brookyln. As you do not have the Airbnb booking details, you can estimate the number of bookings for each property based on the number of reviews they received. You can then extrapolate each property’s revenue with this formula:\n\nNumber of Reviews x Price of Listing x Minimum Length of Stay\n\nThis will serve as a conservative estimate of the revenue, since it is likely that properties will have more bookings than reviews. In addition, guests are also likely to stay longer than the minimum number of nights required.\n\n**Expected Output**\n- (1pt) Write a function to calculate the host revenue using the above formula and return the updated dataframe `df_brooklyn` with a new column `estimated_host_revenue` using the function you created\n- (1pt) Descriptive Statistics of the `estimated_host_revenue`\n",
"_____no_output_____"
]
],
[
[
"# Write a function to calculate the estimated host revenue, update the dataframe with a new column `estimated_host_revenue` calculated using the above formula\n# and return the updated dataframe\n#Your code here\ndef generate_estimate_host_revenue(dataframe):\n dataframe['estimated_host_revenue'] = dataframe['price'] * dataframe['number_of_reviews'] * dataframe['minimum_nights']\n return dataframe",
"_____no_output_____"
],
[
"# Apply your function on `df_brooklyn`\n#Your code here\n#df_brooklyn = ...\ndf_brooklyn = generate_estimate_host_revenue(df_brooklyn)",
"_____no_output_____"
],
[
"df_brooklyn ",
"_____no_output_____"
],
[
"#Use the describe() function column `estimated_host_revenue` to generate descriptive statistics which includes \n# the summary of the central tendency, dispersion and shape of the numerical column.\n#Your code here\ndf_brooklyn['estimated_host_revenue'].describe()",
"_____no_output_____"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\n\ntask_4_part_1_check = data_load_files.TASK_4_PART_1_OUTPUT \n\nbrooklyn_shape_check = (df_brooklyn.shape == task_4_part_1_check.shape)\nbrooklyn_columns_check = (list(df_brooklyn.columns) == list(task_4_part_1_check.columns))\nbrooklyn_est_host_rev_mean_check = (df_brooklyn['estimated_host_revenue'].mean() == task_4_part_1_check['estimated_host_revenue'].mean())\nbrooklyn_est_host_rev_max_check = (df_brooklyn['estimated_host_revenue'].max() == task_4_part_1_check['estimated_host_revenue'].max())\n\nif brooklyn_shape_check and brooklyn_columns_check and brooklyn_est_host_rev_mean_check and brooklyn_est_host_rev_max_check:\n print('df is correct')\nelse: \n df_brooklyn = task_4_part_1_check\n print(\"df is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")\n\n",
"df is correct\n"
]
],
[
[
"# TASK 4, Part 2\n\n**Instructions**\n\nThe advertising campaign mentioned in Task 2 was successful as such the team wants more information on the average prices. Use a pivot table to look at the average prices for different room types within each neighbourhood. \n\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html\n\n**Expected Output**\n\n - (1pt) Correct Pivot Table with `room_type` as column vs `neighbourhood` as index\n - (1pt) Fill the cell with 0 if value is null\n",
"_____no_output_____"
]
],
[
[
"# YOUR CODE FOR TASK 4, PART 2\n#Your code here\n#Pivot = ...\nPivot = df_brooklyn.pivot_table(index=['neighbourhood'], values=['price'], columns=['room_type'], aggfunc=np.mean, fill_value = 0)\nPivot",
"_____no_output_____"
]
],
[
[
"# TASK 5, Part 1\n**Instructions**\n\nThe Airbnb analysts want to know the factors influencing the price. Before proceedeing with Correlation analysis, you need to perform some feature engineering tasks such as converting the categorical columns, dropping descriptive columns.\n\n1. Encode the categorical variable `room_type` and `neighbourhood` using One-Hot Encoding.\n\nMany machine learning algorithms cannot work with categorical data directly. The categories must be converted into numbers. One hot encoding creates new (binary) columns, indicating the presence of each possible value from the original data. \n\nUse pandas get_dummies function to create One-Hot Encoding. \n\nFunction syntax : new_dataframe = pd.get_dummies(dataframe name, columns = [list of categorical columns])\n\n2. Drop the the descriptive columns `name`, `host_id`, `host_name`, `neighbourhood_group`, `latitude` and `longitude` from the dataframe. \n\nExpected Output\n\n - (2pt) Dataframe `df_brooklyn_rt` contains 19 columns now.\n - (1pt) Drop descriptive columns `name`, `host_id`, `host_name`, `neighbourhood_group`, `latitude` and `longitude`from the dataframe\n\n",
"_____no_output_____"
]
],
[
[
"# YOUR CODE FOR TASK 5, PART 1\n# encode the columnns room_type and neighbourhood\n# df_brooklyn_rt = ...\ndf_brooklyn_rt = pd.get_dummies(df_brooklyn, columns=['room_type','neighbourhood'])\ndf_brooklyn_rt",
"_____no_output_____"
],
[
"#drop the descriptive columns from the dataframe \n# df_brooklyn_rt = ...\ndf_brooklyn_rt = df_brooklyn_rt.drop(['name','host_id','host_name','neighbourhood_group','latitude','longitude'],axis=1)",
"_____no_output_____"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\ntask_5_part_1_check = data_load_files.TASK_5_PART_1_OUTPUT \n\nbrooklyn_rt_shape_check = (df_brooklyn_rt.shape == task_5_part_1_check.shape)\nbrooklyn_rt_columns_check = (list(df_brooklyn_rt.columns) == list(task_5_part_1_check.columns))\n\nif brooklyn_rt_shape_check and brooklyn_rt_columns_check:\n print('df is correct')\nelse: \n df_brooklyn_rt = task_5_part_1_check\n print(\"df is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
],
[
[
"# TASK 5, Part 2\n**Instructions**\n\nWe will now study the correlation of the features in the dataset with `price`. Use Pandas dataframe.corr() to find the pairwise correlation of all columns in the dataframe. \n\nUse pandas corr() function to create correlation dataframe.\n\nFunction syntax : new_dataframe = Dataframe.corr()\n\nVisualize the correaltion dataframe using a seaborm heatmap. Heatmap is used to plot rectangular data as a color-encoded matrix.\nhttps://seaborn.pydata.org/generated/seaborn.heatmap.html\n\nExpected Output\n\n - (2pt) Visualize the Correlation matrix using a heatmap\n - (1pt) Correct labels for x and y axis",
"_____no_output_____"
]
],
[
[
"# YOUR CODE FOR TASK 5, PART 2\n# create a correlation matix\n# corr = ...\ncorr = df_brooklyn_rt.corr()\n\n# plot the heatmap\n# sns.heatmap(...)\nsns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns)\n",
"_____no_output_____"
]
],
[
[
"Multicollinearity occurs when your data includes multiple attributes that are correlated not just to your target variable, but also to each other. \n\n**Based on the correlation matrix, answer the following:\n\n1. Which columns would you drop to prevent multicollinearity? \nSample Answer: brooklyn_whole or number_of_reviews\n2. Which columns do you find are positively related to the price?\nSample Answer: reviews_per_month\n",
"_____no_output_____"
]
],
[
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\ntask_5_part_2_check = data_load_files.TASK_5_PART_2_OUTPUT\n\ncorr_shape_check = (corr.shape == task_5_part_2_check.shape)\n \nif corr_shape_check:\n print('df is correct')\nelse: \n print(\"df is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
],
[
[
"## TASK 6\n\nProperty Hosts are expected to set their own prices for their listings. Although Airbnb provide some general guidance, there are currently no services which help hosts price their properties using range of data points.\n\nAirbnb pricing is important to get right, particularly in big cities like New York where there is a lot of competition and even small differences in prices can make a big difference. It is also a difficult thing to do correctly — price too high and no one will book. Price too low and you’ll be missing out on a lot of potential income.\n\nNow, let’s try to make a price prediction model using the basic machine learning model from scikit learn. It is a linear regression model that we will use to predict the prices. Import the correct Library",
"_____no_output_____"
],
[
"### Task 6, Part 1\n\n**Instructions**\n\n**Preparing the data for training the model**\nBased on the correlation plot observations, we have now identified the features that influence the price of an accomodation. We will prepare the data to train the price prediction model. \n\nWe will create two dataframes 'X' (contains all features influencing the price) and 'Y' (contains the feature price) from `df_brooklyn_rt`. \n1. To create Y, select the `price` column from `df_brooklyn_rt`\n2. To create X, drop the columns `price`, `last_review`, `brooklyn_whole`,`price_category` from `df_brooklyn_rt`. We are dropping `brooklyn_whole` as it was causing multicollinearity with `room_type`.\n\n \n**Splitting the data into training and testing sets**\n\nNext, we split the X and Y datasets into training and testing sets. We train the model with 80% of the samples and test with the remaining 20%. We do this to assess the model’s performance on unseen data. To split the data we use train_test_split function provided by scikit-learn library. We finally print the sizes of our training and test set to verify if the splitting has occurred properly.\n\n**Note**: Please don't change the value for `random_state` in your code, it should set be 5.\n \n**Expected Output**\n- (1pt) Create dataframe Y from `df_brooklyn_rt`, select only column `price`\n- (1pt) Create dataframe X from `df_brooklyn_rt`, do not include columns `price`, `last_review`,` brooklyn_whole` and `price_category`\n- (1pt) Split the dataframes X and Y into train and test datasets using the train_test_split function\n\n",
"_____no_output_____"
]
],
[
[
"#Your code here \n#X = \n#Y =",
"_____no_output_____"
],
[
"#Solution\nX = df_brooklyn_rt.drop(['price', 'last_review', 'brooklyn_whole','price_category'], axis = 1)\nY = df_brooklyn_rt['price']",
"_____no_output_____"
],
[
"#Your code here \n#Please don't change the test_size value it should remain 0.2\n#X_train, X_test, Y_train, Y_test = train_test_split(<....your X value here...>, <your Y value here>, test_size = 0.2, random_state=5)\n#print(X_train.shape)\n#print(X_test.shape)\n#print(Y_train.shape)\n#print(Y_test.shape)",
"_____no_output_____"
],
[
"#Solution\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state=5)\nprint(X_train.shape)\nprint(X_test.shape)\nprint(Y_train.shape)\nprint(Y_test.shape)",
"(13151, 21)\n(3288, 21)\n(13151,)\n(3288,)\n"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\ntask_6_part_1_output_1_check = data_load_files.TASK_6_PART_1_OUTPUT_1\ntask_6_part_1_output_2_check = data_load_files.TASK_6_PART_1_OUTPUT_2\ntask_6_part_1_output_3_check = data_load_files.TASK_6_PART_1_OUTPUT_3\ntask_6_part_1_output_4_check = data_load_files.TASK_6_PART_1_OUTPUT_4\n\n#X_train\nxtrain_shape_check = (X_train.shape == task_6_part_1_output_1_check.shape)\nxtrain_columns_check = (list(X_train.columns) == list(task_6_part_1_output_1_check.columns))\n\n#X_test\nxtest_shape_check = (X_test.shape == task_6_part_1_output_2_check.shape)\nxtest_columns_check = (list(X_test.columns) == list(task_6_part_1_output_2_check.columns))\n\nif xtrain_shape_check and xtrain_columns_check and \\\n xtest_shape_check and xtest_columns_check:\n print('dfs are correct')\nelse: \n X_train = task_6_part_1_output_1_check\n X_test = task_6_part_1_output_2_check\n Y_train = task_6_part_1_output_3_check\n Y_test = task_6_part_1_output_4_check\n print(\"dfs are incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"dfs are correct\n"
]
],
[
[
"### Task 6 Part 2\n\n**Instructions**\n\nTraining the model\nWe use scikit-learn’s LinearRegression to train our model. Using the fit() method, we will pass the training datasets X_train and Y_train as arguments to the linear regression model. \n\nTesting the model\nThe model has learnt about the dataset. We will now use the trained model on the test dataset, X_test. Using the predict() method, we will pass the test dataset X_Test as an argument to the model.\n\nExpected Output\n- (1pt) Pass the training datasets X_train and Y_train to the fit method as arguments\n- (1pt) Pass the test dataset X_test to the predict method as argument",
"_____no_output_____"
]
],
[
[
"#Run this cell\nlin_model = LinearRegression()",
"_____no_output_____"
],
[
"#Training the model\n#Your code here\n#lin_model.fit(X_argument, Y_argument)\n\n#Solution\nlin_model.fit(X_train, Y_train)",
"_____no_output_____"
],
[
"#Testing the model\n#Your code here\n#y_test_predict = lin_model.predict(...X test Dataset...)\n\n\n#Solution\ny_test_predict = lin_model.predict(X_test)\n",
"_____no_output_____"
],
[
"#Run this cell\n#Model Evaluation\nrmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict)))\nr2 = r2_score(Y_test, y_test_predict)\n\nprint(\"The model performance for testing set\")\nprint(\"--------------------------------------\")\nprint('RMSE is {}'.format(round(rmse,3)))\nprint('R2 score is {}'.format(round(r2,3)))",
"The model performance for testing set\n--------------------------------------\nRMSE is 176.637\nR2 score is 0.073\n"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\ntask_6_part_2_check = data_load_files.TASK_6_PART_2_OUTPUT\nround(rmse,3)\nrmse_check = (round(rmse,3) == task_6_part_2_check['RMSE'][0])\nr2_check = (round(r2,3) == task_6_part_2_check['R2'][0])\nif rmse_check and r2_check:\n print('Model evaluation is correct')\nelse:\n print('Model evaluation is incorrect')\n \n\n\n",
"Model evaluation is correct\n"
]
],
[
[
"### Task 6 Part 3\n\n**Instructions**\n\nNow we will compare the actual output values for X_test with the predicted values using a bar chart.\n\n\n- 1(pt) Create a new dataframe <code>lr_pred_df</code> using the <code>Y_test</code> and <code>y_test_predict</code>\n\n- 1(pt) Use first 20 records from the dataframe <code>lr_pred_df</code> and plot a bar graph showing comparision of actual and predicted values set Y axis label as 'Price' and Plot title as 'Actual vs Predicted Price'",
"_____no_output_____"
]
],
[
[
"#Actual Vs Predicted for Linear Regression\n#Your code here\n#lr_pred_df =\n\n#Solution\nlr_pred_df = pd.DataFrame({\n 'actual_values': np.array(Y_test).flatten(),\n 'y_test_predict': y_test_predict.flatten()})",
"_____no_output_____"
],
[
"#Your code here\n#lr_pred_df.plot() \n\n\n#Solution\nlr_pred_df = lr_pred_df.head(20)\nplt = lr_pred_df.plot(kind='bar')\nplt",
"_____no_output_____"
],
[
"## RUN THIS CELL AS-IS TO CHECK IF YOUR OUTPUTS ARE CORRECT. IF THEY ARE NOT,\n## THE APPROPRIATE OBJECTS WILL BE LOADED IN TO ENSURE THAT YOU CAN CONTINUE\n## WITH THE ASSESSMENT.\ntask_6_part_3_check = data_load_files.TASK_6_PART_3_OUTPUT \n\nlr_pred_df_shape_check = (lr_pred_df.shape == task_6_part_3_check.shape)\nlr_pred_df_columns_check = (list(lr_pred_df.columns) == list(task_6_part_3_check.columns))\n\nif lr_pred_df_shape_check and lr_pred_df_columns_check:\n print('df is correct')\nelse: \n lr_pred_df = task_6_part_3_check\n print(\"df is incorrect. You can correct for points, but you will still be able to move on to the next task if not.\")",
"df is correct\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e74f9d98b7425b1187e3af184dc7916906f1d5d4 | 10,291 | ipynb | Jupyter Notebook | 5. Databases_SQL/1-1-Connecting-v4-py.ipynb | naquech/IBM_Watson_Studio | 9bc831b1448a60b8720b232e9a74d40665ef2cbf | [
"MIT"
] | null | null | null | 5. Databases_SQL/1-1-Connecting-v4-py.ipynb | naquech/IBM_Watson_Studio | 9bc831b1448a60b8720b232e9a74d40665ef2cbf | [
"MIT"
] | null | null | null | 5. Databases_SQL/1-1-Connecting-v4-py.ipynb | naquech/IBM_Watson_Studio | 9bc831b1448a60b8720b232e9a74d40665ef2cbf | [
"MIT"
] | null | null | null | 29.319088 | 412 | 0.571276 | [
[
[
"<a href=\"https://www.bigdatauniversity.com\"><img src = \"https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png\" width = 300, align = \"center\"></a>\n\n<h1 align=center><font size = 5>Lab: Connect to Db2 database on Cloud using Python</font></h1>",
"_____no_output_____"
],
[
"# Introduction\n\nThis notebook illustrates how to access a DB2 database on Cloud using Python by following the steps below:\n1. Import the `ibm_db` Python library\n1. Enter the database connection credentials\n1. Create the database connection\n1. Close the database connection\n\n\n\n__Note:__ Please follow the instructions given in the first Lab of this course to Create a database service instance of Db2 on Cloud and retrieve your database Service Credentials.\n\n## Import the `ibm_db` Python library\n\nThe `ibm_db` [API ](https://pypi.python.org/pypi/ibm_db/) provides a variety of useful Python functions for accessing and manipulating data in an IBM® data server database, including functions for connecting to a database, preparing and issuing SQL statements, fetching rows from result sets, calling stored procedures, committing and rolling back transactions, handling errors, and retrieving metadata.\n\n\nWe first import the ibm_db library into our Python Application\n\nExecute the following cell by clicking within it and then \npress `Shift` and `Enter` keys simultaneously\n",
"_____no_output_____"
]
],
[
[
"import ibm_db",
"_____no_output_____"
]
],
[
[
"When the command above completes, the `ibm_db` library is loaded in your notebook. \n\n\n## Identify the database connection credentials\n\nConnecting to dashDB or DB2 database requires the following information:\n* Driver Name\n* Database name \n* Host DNS name or IP address \n* Host port\n* Connection protocol\n* User ID (or username)\n* User Password\n\n\n\n__Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this course\n\nNow enter your database credentials below and execute the cell with `Shift` + `Enter`\n",
"_____no_output_____"
]
],
[
[
"#Replace the placeholder values with your actual Db2 hostname, username, and password:\ndsn_hostname = \"YourDb2Hostname\" # e.g.: \"dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net\"\ndsn_uid = \"YourDb2Username\" # e.g. \"abc12345\"\ndsn_pwd = \"YoueDb2Password\" # e.g. \"7dBZ3wWt9XN6$o0J\"\n\ndsn_driver = \"{IBM DB2 ODBC DRIVER}\"\ndsn_database = \"BLUDB\" # e.g. \"BLUDB\"\ndsn_port = \"50000\" # e.g. \"50000\" \ndsn_protocol = \"TCPIP\" # i.e. \"TCPIP\"",
"_____no_output_____"
],
[
"# @hidden_cell\ndsn_hostname = \"dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net\" \ndsn_uid = \"wvb91528\" \ndsn_pwd = \"tm^1nlbn4dj3j04b\" \n\ndsn_driver = \"DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=wvb91528;PWD=tm^1nlbn4dj3j04b;\"\ndsn_database = \"BLUDB\" \ndsn_port = \"50000\" \ndsn_protocol = \"TCPIP\" ",
"_____no_output_____"
]
],
[
[
"## Create the DB2 database connection\n\nIbm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix.\n\n\nLets build the dsn connection string using the credentials you entered above\n",
"_____no_output_____"
]
],
[
[
"#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter\n#Create the dsn connection string\ndsn = (\n \"DRIVER={0};\"\n \"DATABASE={1};\"\n \"HOSTNAME={2};\"\n \"PORT={3};\"\n \"PROTOCOL={4};\"\n \"UID={5};\"\n \"PWD={6};\").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd)\n\n#print the connection string to check correct values are specified\n#print(dsn)",
"DRIVER=DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=wvb91528;PWD=tm^1nlbn4dj3j04b;;DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=wvb91528;PWD=tm^1nlbn4dj3j04b;\n"
]
],
[
[
"Now establish the connection to the database",
"_____no_output_____"
]
],
[
[
"#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter\n#Create database connection\n\ntry:\n conn = ibm_db.connect(dsn, \"\", \"\")\n print (\"Connected to database: \", dsn_database, \"as user: \", dsn_uid, \"on host: \", dsn_hostname)\n\nexcept:\n print (\"Unable to connect: \", ibm_db.conn_errormsg() )\n",
"Connected to database: BLUDB as user: wvb91528 on host: dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net\n"
]
],
[
[
"Congratulations if you were able to connect successfuly. Otherwise check the error and try again.",
"_____no_output_____"
]
],
[
[
"#Retrieve Metadata for the Database Server\nserver = ibm_db.server_info(conn)\n\nprint (\"DBMS_NAME: \", server.DBMS_NAME)\nprint (\"DBMS_VER: \", server.DBMS_VER)\nprint (\"DB_NAME: \", server.DB_NAME)",
"DBMS_NAME: DB2/LINUXX8664\nDBMS_VER: 11.01.0303\nDB_NAME: BLUDB\n"
],
[
"#Retrieve Metadata for the Database Client / Driver\nclient = ibm_db.client_info(conn)\n\nprint (\"DRIVER_NAME: \", client.DRIVER_NAME) \nprint (\"DRIVER_VER: \", client.DRIVER_VER)\nprint (\"DATA_SOURCE_NAME: \", client.DATA_SOURCE_NAME)\nprint (\"DRIVER_ODBC_VER: \", client.DRIVER_ODBC_VER)\nprint (\"ODBC_VER: \", client.ODBC_VER)\nprint (\"ODBC_SQL_CONFORMANCE: \", client.ODBC_SQL_CONFORMANCE)\nprint (\"APPL_CODEPAGE: \", client.APPL_CODEPAGE)\nprint (\"CONN_CODEPAGE: \", client.CONN_CODEPAGE)",
"DRIVER_NAME: libdb2.a\nDRIVER_VER: 11.01.0404\nDATA_SOURCE_NAME: BLUDB\nDRIVER_ODBC_VER: 03.51\nODBC_VER: 03.01.0000\nODBC_SQL_CONFORMANCE: EXTENDED\nAPPL_CODEPAGE: 1208\nCONN_CODEPAGE: 1208\n"
]
],
[
[
"## Close the Connection\nWe free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources.",
"_____no_output_____"
]
],
[
[
"ibm_db.close(conn)",
"_____no_output_____"
]
],
[
[
"## Summary\n\nIn this tutorial you established a connection to a DB2 database on Cloud database from a Python notebook using ibm_db API. ",
"_____no_output_____"
],
[
"Copyright © 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e74fa6d61d7fcc0438e3c6a71f9baba3e44300cf | 170,604 | ipynb | Jupyter Notebook | Pandas121.ipynb | mariuszkr33/dw_matrix | 2dbb2ebcdfe54e2049b56069c435617e3bfbf4c4 | [
"MIT"
] | null | null | null | Pandas121.ipynb | mariuszkr33/dw_matrix | 2dbb2ebcdfe54e2049b56069c435617e3bfbf4c4 | [
"MIT"
] | null | null | null | Pandas121.ipynb | mariuszkr33/dw_matrix | 2dbb2ebcdfe54e2049b56069c435617e3bfbf4c4 | [
"MIT"
] | null | null | null | 73.409639 | 34,606 | 0.65723 | [
[
[
"<a href=\"https://colab.research.google.com/github/mariuszkr33/dw_matrix/blob/master/Pandas121.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\n\n\nsns.set()\nnp.__version__\n",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
],
[
"def fetch_financial_data(company='AMZN'):\n \n import pandas_datareader.data as web\n return web.DataReader(name=company, data_source='stooq')",
"_____no_output_____"
],
[
"google = fetch_financial_data(company='GOOGL')\ngoogle",
"_____no_output_____"
],
[
"google.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 1259 entries, 2020-06-26 to 2015-06-29\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Open 1259 non-null float64\n 1 High 1259 non-null float64\n 2 Low 1259 non-null float64\n 3 Close 1259 non-null float64\n 4 Volume 1259 non-null int64 \ndtypes: float64(4), int64(1)\nmemory usage: 59.0 KB\n"
],
[
"pd.set_option('precision',2)\ngoogle.describe()",
"_____no_output_____"
],
[
"pd.options.display.float_format = '{:.2f}'.format\ngoogle.describe()",
"_____no_output_____"
],
[
"google['Close'].plot()",
"_____no_output_____"
],
[
"google.head()",
"_____no_output_____"
],
[
"google = google.reset_index()\ngoogle",
"_____no_output_____"
],
[
"google['Month'] = google['Date'].dt.month\ngoogle['Year'] = google['Date'].dt.year\ngoogle",
"_____no_output_____"
],
[
" google.groupby('Year')['Close'].mean()\n ",
"_____no_output_____"
],
[
" google.groupby('Year')['Close'].mean().plot()",
"_____no_output_____"
],
[
"google.groupby('Month')['Close'].mean()",
"_____no_output_____"
],
[
"google.groupby('Month')['Close'].mean().plot()",
"_____no_output_____"
],
[
"google.groupby(['Month','Year'])['Close'].mean()",
"_____no_output_____"
],
[
"google.groupby(['Month','Year'])['Close'].mean().plot()",
"_____no_output_____"
],
[
"google['Close'].argmax()",
"_____no_output_____"
],
[
"google.iloc[[google['Close'].argmax()]]",
"_____no_output_____"
],
[
"google.iloc[[47]]",
"_____no_output_____"
],
[
"google.iloc[47]",
"_____no_output_____"
],
[
"google[['Date','Open','Close','Volume']]\n",
"_____no_output_____"
],
[
"google = google.set_index('Date')\ngoogle",
"_____no_output_____"
],
[
"google = google.drop(columns=['Year','Month'])\ngoogle",
"_____no_output_____"
],
[
"google.columns",
"_____no_output_____"
],
[
"google.columns = ['Otwarcie','Najwyższy','Najniższy','Zamknięcie','Wolumen']\ngoogle",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74faed5e81fe5b534490dde7d0ba527fa48636d | 21,107 | ipynb | Jupyter Notebook | tutorials/segmentation/spleen_segmentation_3d.ipynb | YipengHu/MPHY0043 | 80aee58801def8fd1176fbc1a1f1892a9a98e172 | [
"Apache-2.0"
] | null | null | null | tutorials/segmentation/spleen_segmentation_3d.ipynb | YipengHu/MPHY0043 | 80aee58801def8fd1176fbc1a1f1892a9a98e172 | [
"Apache-2.0"
] | null | null | null | tutorials/segmentation/spleen_segmentation_3d.ipynb | YipengHu/MPHY0043 | 80aee58801def8fd1176fbc1a1f1892a9a98e172 | [
"Apache-2.0"
] | 1 | 2021-12-15T12:22:24.000Z | 2021-12-15T12:22:24.000Z | 33.344392 | 136 | 0.530677 | [
[
[
"## Setup environment",
"_____no_output_____"
]
],
[
[
"from monai.utils import first, set_determinism\nfrom monai.transforms import (\n AsDiscrete,\n AsDiscreted,\n EnsureChannelFirstd,\n Compose,\n CropForegroundd,\n LoadImaged,\n Orientationd,\n RandCropByPosNegLabeld,\n ScaleIntensityRanged,\n Spacingd,\n EnsureTyped,\n EnsureType,\n Invertd,\n)\nfrom monai.handlers.utils import from_engine\nfrom monai.networks.nets import UNet\nfrom monai.networks.layers import Norm\nfrom monai.metrics import DiceMetric\nfrom monai.losses import DiceLoss\nfrom monai.inferers import sliding_window_inference\nfrom monai.data import CacheDataset, DataLoader, Dataset, decollate_batch\nfrom monai.config import print_config\nfrom monai.apps import download_and_extract\nimport torch\nimport matplotlib.pyplot as plt\nimport tempfile\nimport shutil\nimport os\nimport glob",
"_____no_output_____"
]
],
[
[
"## Setup imports",
"_____no_output_____"
]
],
[
[
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nprint_config()",
"_____no_output_____"
]
],
[
[
"## Setup data directory\n\nYou can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. \nThis allows you to save results and reuse downloads. \nIf not specified a temporary directory will be used.",
"_____no_output_____"
]
],
[
[
"directory = os.environ.get(\"MONAI_DATA_DIRECTORY\")\nroot_dir = tempfile.mkdtemp() if directory is None else directory\nprint(root_dir)",
"_____no_output_____"
]
],
[
[
"## Download dataset\n\nDownloads and extracts the dataset. \nThe dataset comes from http://medicaldecathlon.com/.",
"_____no_output_____"
]
],
[
[
"resource = \"https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar\"\nmd5 = \"410d4a301da4e5b2f6f86ec3ddba524e\"\n\ncompressed_file = os.path.join(root_dir, \"Task09_Spleen.tar\")\ndata_dir = os.path.join(root_dir, \"Task09_Spleen\")\nif not os.path.exists(data_dir):\n download_and_extract(resource, compressed_file, root_dir, md5)",
"_____no_output_____"
]
],
[
[
"## Set MSD Spleen dataset path",
"_____no_output_____"
]
],
[
[
"train_images = sorted(\n glob.glob(os.path.join(data_dir, \"imagesTr\", \"*.nii.gz\")))\ntrain_labels = sorted(\n glob.glob(os.path.join(data_dir, \"labelsTr\", \"*.nii.gz\")))\ndata_dicts = [\n {\"image\": image_name, \"label\": label_name}\n for image_name, label_name in zip(train_images, train_labels)\n]\ntrain_files, val_files = data_dicts[:-9], data_dicts[-9:]",
"_____no_output_____"
]
],
[
[
"## Set deterministic training for reproducibility",
"_____no_output_____"
]
],
[
[
"set_determinism(seed=0)",
"_____no_output_____"
]
],
[
[
"## Setup transforms for training and validation\n\nHere we use several transforms to augment the dataset:\n1. `LoadImaged` loads the spleen CT images and labels from NIfTI format files.\n1. `AddChanneld` as the original data doesn't have channel dim, add 1 dim to construct \"channel first\" shape.\n1. `Spacingd` adjusts the spacing by `pixdim=(1.5, 1.5, 2.)` based on the affine matrix.\n1. `Orientationd` unifies the data orientation based on the affine matrix.\n1. `ScaleIntensityRanged` extracts intensity range [-57, 164] and scales to [0, 1].\n1. `CropForegroundd` removes all zero borders to focus on the valid body area of the images and labels.\n1. `RandCropByPosNegLabeld` randomly crop patch samples from big image based on pos / neg ratio. \nThe image centers of negative samples must be in valid body area.\n1. `RandAffined` efficiently performs `rotate`, `scale`, `shear`, `translate`, etc. together based on PyTorch affine transform.\n1. `EnsureTyped` converts the numpy array to PyTorch Tensor for further steps.",
"_____no_output_____"
]
],
[
[
"train_transforms = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n Spacingd(keys=[\"image\", \"label\"], pixdim=(\n 1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(\n keys=[\"image\"], a_min=-57, a_max=164,\n b_min=0.0, b_max=1.0, clip=True,\n ),\n CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n RandCropByPosNegLabeld(\n keys=[\"image\", \"label\"],\n label_key=\"label\",\n spatial_size=(96, 96, 96),\n pos=1,\n neg=1,\n num_samples=4,\n image_key=\"image\",\n image_threshold=0,\n ),\n # user can also add other random transforms\n # RandAffined(\n # keys=['image', 'label'],\n # mode=('bilinear', 'nearest'),\n # prob=1.0, spatial_size=(96, 96, 96),\n # rotate_range=(0, 0, np.pi/15),\n # scale_range=(0.1, 0.1, 0.1)),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n)\nval_transforms = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n Spacingd(keys=[\"image\", \"label\"], pixdim=(\n 1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(\n keys=[\"image\"], a_min=-57, a_max=164,\n b_min=0.0, b_max=1.0, clip=True,\n ),\n CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n)",
"_____no_output_____"
]
],
[
[
"## Check transforms in DataLoader",
"_____no_output_____"
]
],
[
[
"check_ds = Dataset(data=val_files, transform=val_transforms)\ncheck_loader = DataLoader(check_ds, batch_size=1)\ncheck_data = first(check_loader)\nimage, label = (check_data[\"image\"][0][0], check_data[\"label\"][0][0])\nprint(f\"image shape: {image.shape}, label shape: {label.shape}\")\n# plot the slice [:, :, 80]\nplt.figure(\"check\", (12, 6))\nplt.subplot(1, 2, 1)\nplt.title(\"image\")\nplt.imshow(image[:, :, 80], cmap=\"gray\")\nplt.subplot(1, 2, 2)\nplt.title(\"label\")\nplt.imshow(label[:, :, 80])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Define CacheDataset and DataLoader for training and validation\n\nHere we use CacheDataset to accelerate training and validation process, it's 10x faster than the regular Dataset. \nTo achieve best performance, set `cache_rate=1.0` to cache all the data, if memory is not enough, set lower value. \nUsers can also set `cache_num` instead of `cache_rate`, will use the minimum value of the 2 settings. \nAnd set `num_workers` to enable multi-threads during caching. \nIf want to to try the regular Dataset, just change to use the commented code below.",
"_____no_output_____"
]
],
[
[
"train_ds = CacheDataset(\n data=train_files, transform=train_transforms,\n cache_rate=1.0, num_workers=4)\n# train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)\n\n# use batch_size=2 to load images and use RandCropByPosNegLabeld\n# to generate 2 x 4 images for network training\ntrain_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)\n\nval_ds = CacheDataset(\n data=val_files, transform=val_transforms, cache_rate=1.0, num_workers=4)\n# val_ds = Dataset(data=val_files, transform=val_transforms)\nval_loader = DataLoader(val_ds, batch_size=1, num_workers=4)",
"_____no_output_____"
]
],
[
[
"## Create Model, Loss, Optimizer",
"_____no_output_____"
]
],
[
[
"# standard PyTorch program style: create UNet, DiceLoss and Adam optimizer\ndevice = torch.device(\"cpu\")\nmodel = UNet(\n dimensions=3,\n in_channels=1,\n out_channels=2,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n norm=Norm.BATCH,\n).to(device)\nloss_function = DiceLoss(to_onehot_y=True, softmax=True)\noptimizer = torch.optim.Adam(model.parameters(), 1e-4)\ndice_metric = DiceMetric(include_background=False, reduction=\"mean\")",
"_____no_output_____"
]
],
[
[
"## Execute a typical PyTorch training process",
"_____no_output_____"
]
],
[
[
"max_epochs = 600\nval_interval = 2\nbest_metric = -1\nbest_metric_epoch = -1\nepoch_loss_values = []\nmetric_values = []\npost_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\npost_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n\nfor epoch in range(max_epochs):\n print(\"-\" * 10)\n print(f\"epoch {epoch + 1}/{max_epochs}\")\n model.train()\n epoch_loss = 0\n step = 0\n for batch_data in train_loader:\n step += 1\n inputs, labels = (\n batch_data[\"image\"].to(device),\n batch_data[\"label\"].to(device),\n )\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = loss_function(outputs, labels)\n loss.backward()\n optimizer.step()\n epoch_loss += loss.item()\n print(\n f\"{step}/{len(train_ds) // train_loader.batch_size}, \"\n f\"train_loss: {loss.item():.4f}\")\n epoch_loss /= step\n epoch_loss_values.append(epoch_loss)\n print(f\"epoch {epoch + 1} average loss: {epoch_loss:.4f}\")\n\n if (epoch + 1) % val_interval == 0:\n model.eval()\n with torch.no_grad():\n for val_data in val_loader:\n val_inputs, val_labels = (\n val_data[\"image\"].to(device),\n val_data[\"label\"].to(device),\n )\n roi_size = (160, 160, 160)\n sw_batch_size = 4\n val_outputs = sliding_window_inference(\n val_inputs, roi_size, sw_batch_size, model)\n val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]\n val_labels = [post_label(i) for i in decollate_batch(val_labels)]\n # compute metric for current iteration\n dice_metric(y_pred=val_outputs, y=val_labels)\n\n # aggregate the final mean dice result\n metric = dice_metric.aggregate().item()\n # reset the status for next validation round\n dice_metric.reset()\n\n metric_values.append(metric)\n if metric > best_metric:\n best_metric = metric\n best_metric_epoch = epoch + 1\n torch.save(model.state_dict(), os.path.join(\n root_dir, \"best_metric_model.pth\"))\n print(\"saved new best metric model\")\n print(\n f\"current epoch: {epoch + 1} current mean dice: {metric:.4f}\"\n f\"\\nbest mean dice: {best_metric:.4f} \"\n f\"at epoch: {best_metric_epoch}\"\n )",
"_____no_output_____"
],
[
"print(\n f\"train completed, best_metric: {best_metric:.4f} \"\n f\"at epoch: {best_metric_epoch}\")",
"_____no_output_____"
]
],
[
[
"## Plot the loss and metric",
"_____no_output_____"
]
],
[
[
"plt.figure(\"train\", (12, 6))\nplt.subplot(1, 2, 1)\nplt.title(\"Epoch Average Loss\")\nx = [i + 1 for i in range(len(epoch_loss_values))]\ny = epoch_loss_values\nplt.xlabel(\"epoch\")\nplt.plot(x, y)\nplt.subplot(1, 2, 2)\nplt.title(\"Val Mean Dice\")\nx = [val_interval * (i + 1) for i in range(len(metric_values))]\ny = metric_values\nplt.xlabel(\"epoch\")\nplt.plot(x, y)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Check best model output with the input image and label",
"_____no_output_____"
]
],
[
[
"model.load_state_dict(torch.load(\n os.path.join(root_dir, \"best_metric_model.pth\")))\nmodel.eval()\nwith torch.no_grad():\n for i, val_data in enumerate(val_loader):\n roi_size = (160, 160, 160)\n sw_batch_size = 4\n val_outputs = sliding_window_inference(\n val_data[\"image\"].to(device), roi_size, sw_batch_size, model\n )\n # plot the slice [:, :, 80]\n plt.figure(\"check\", (18, 6))\n plt.subplot(1, 3, 1)\n plt.title(f\"image {i}\")\n plt.imshow(val_data[\"image\"][0, 0, :, :, 80], cmap=\"gray\")\n plt.subplot(1, 3, 2)\n plt.title(f\"label {i}\")\n plt.imshow(val_data[\"label\"][0, 0, :, :, 80])\n plt.subplot(1, 3, 3)\n plt.title(f\"output {i}\")\n plt.imshow(torch.argmax(\n val_outputs, dim=1).detach().cpu()[0, :, :, 80])\n plt.show()\n if i == 2:\n break",
"_____no_output_____"
]
],
[
[
"## Evaluation on original image spacings",
"_____no_output_____"
]
],
[
[
"val_org_transforms = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n Spacingd(keys=[\"image\"], pixdim=(\n 1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(\n keys=[\"image\"], a_min=-57, a_max=164,\n b_min=0.0, b_max=1.0, clip=True,\n ),\n CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n)\n\nval_org_ds = Dataset(\n data=val_files, transform=val_org_transforms)\nval_org_loader = DataLoader(val_org_ds, batch_size=1, num_workers=4)\n\npost_transforms = Compose([\n EnsureTyped(keys=\"pred\"),\n Invertd(\n keys=\"pred\",\n transform=val_org_transforms,\n orig_keys=\"image\",\n meta_keys=\"pred_meta_dict\",\n orig_meta_keys=\"image_meta_dict\",\n meta_key_postfix=\"meta_dict\",\n nearest_interp=False,\n to_tensor=True,\n ),\n AsDiscreted(keys=\"pred\", argmax=True, to_onehot=True, n_classes=2),\n AsDiscreted(keys=\"label\", to_onehot=True, n_classes=2),\n])",
"_____no_output_____"
],
[
"model.load_state_dict(torch.load(\n os.path.join(root_dir, \"best_metric_model.pth\")))\nmodel.eval()\n\nwith torch.no_grad():\n for val_data in val_org_loader:\n val_inputs = val_data[\"image\"].to(device)\n roi_size = (160, 160, 160)\n sw_batch_size = 4\n val_data[\"pred\"] = sliding_window_inference(\n val_inputs, roi_size, sw_batch_size, model)\n val_data = [post_transforms(i) for i in decollate_batch(val_data)]\n val_outputs, val_labels = from_engine([\"pred\", \"label\"])(val_data)\n # compute metric for current iteration\n dice_metric(y_pred=val_outputs, y=val_labels)\n\n # aggregate the final mean dice result\n metric_org = dice_metric.aggregate().item()\n # reset the status for next validation round\n dice_metric.reset()\n\nprint(\"Metric on original image spacing: \", metric_org)",
"_____no_output_____"
]
],
[
[
"## Cleanup data directory\n\nRemove directory if a temporary was used.",
"_____no_output_____"
]
],
[
[
"if directory is None:\n shutil.rmtree(root_dir)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74fd212bddbc47dd74bf1c5de3410d447930010 | 6,518 | ipynb | Jupyter Notebook | FeatureCollection/set_properties.ipynb | c11/earthengine-py-notebooks | 144b57e4d952da095ba73c3cc8ce2f36291162ff | [
"MIT"
] | 1 | 2020-05-31T14:19:59.000Z | 2020-05-31T14:19:59.000Z | FeatureCollection/set_properties.ipynb | c11/earthengine-py-notebooks | 144b57e4d952da095ba73c3cc8ce2f36291162ff | [
"MIT"
] | null | null | null | FeatureCollection/set_properties.ipynb | c11/earthengine-py-notebooks | 144b57e4d952da095ba73c3cc8ce2f36291162ff | [
"MIT"
] | null | null | null | 43.453333 | 1,031 | 0.591286 | [
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/set_properties.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/set_properties.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/set_properties.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).",
"_____no_output_____"
]
],
[
[
"# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as emap\nexcept:\n import geemap as emap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThe default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ",
"_____no_output_____"
]
],
[
[
"Map = emap.Map(center=[40,-100], zoom=4)\nMap.add_basemap('ROADMAP') # Add Google Map\nMap",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
]
],
[
[
"# Add Earth Engine dataset\n# Make a feature and set some properties.\nfeature = ee.Feature(ee.Geometry.Point([-122.22599, 37.17605])) \\\n .set('genus', 'Sequoia').set('species', 'sempervirens')\n\n# Get a property from the feature.\nspecies = feature.get('species')\nprint(species.getInfo())\n\n# Set a new property.\nfeature = feature.set('presence', 1)\n\n# Overwrite the old properties with a new dictionary.\nnewDict = {'genus': 'Brachyramphus', 'species': 'marmoratus'}\nfeature = feature.set(newDict)\n\n# Check the result.\nprint(feature.getInfo())\n\n",
"_____no_output_____"
]
],
[
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74ff34632d2541e5316808bf0394d8e650a5235 | 15,942 | ipynb | Jupyter Notebook | text_summarization_torch.ipynb | darrenkoh/Tesla-News-Classifier | 97270040096a3eac997a6f471b037dac86ef8225 | [
"MIT"
] | null | null | null | text_summarization_torch.ipynb | darrenkoh/Tesla-News-Classifier | 97270040096a3eac997a6f471b037dac86ef8225 | [
"MIT"
] | null | null | null | text_summarization_torch.ipynb | darrenkoh/Tesla-News-Classifier | 97270040096a3eac997a6f471b037dac86ef8225 | [
"MIT"
] | null | null | null | 40.564885 | 336 | 0.48827 | [
[
[
"import torch\nimport json\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'",
"_____no_output_____"
],
[
"# Bart Model\nfrom transformers import BartTokenizer, BartForConditionalGeneration, BartConfig\nmodel = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn').to(device)\ntokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')",
"_____no_output_____"
],
[
"df = pd.read_json('foxnews_news_cleaned.json')\n\n# Generate in small batches else running out of memory on GTX 1080Ti :(\nsummary = []\nwith torch.no_grad():\n for x in tqdm(range(0,df.shape[0]), ncols=100):\n batch = tokenizer(df.body.loc[x], truncation=True, padding='longest', return_tensors=\"pt\").to(device)\n translated = model.generate(**batch)\n summary.append(tokenizer.decode(translated, skip_special_tokens=True))\n #print(summary[-1])\ndf['summary'] = summary",
"100%|█████████████████████████████████████████████████████████████| 414/414 [36:15<00:00, 5.25s/it]\n"
],
[
"df.summary.value_counts().nlargest(2)",
"_____no_output_____"
],
[
"# Save the cleaned data\nparsed = json.loads(df.to_json(orient='records', force_ascii=False, indent=4))\nwith open('foxnews_news_cleaned_summary.json', 'w', encoding='utf-8') as f:\n json.dump(parsed, f, indent=4)",
"_____no_output_____"
],
[
"df = pd.read_json('foxnews_news_cleaned_summary.json')\ndf",
"_____no_output_____"
],
[
"# Split to training/test set\ndf = pd.read_json('foxnews_news_cleaned_summary.json')\ndf = df.loc[df['sentiment'] != 'NA', ['title', 'summary', 'sentiment']]\nlabel = {'neutral':0,'positive':1,'negative':2}\ndf['labels'] = df['sentiment'].map(label)\n\n# Split the train/test set\ntrain, test = train_test_split(df[['title', 'summary', 'sentiment', 'labels']], test_size=0.2, random_state=12, shuffle=True)\ntrain.columns = ['title','summary','sentiment', 'labels']\ntest.columns = ['title','sumamry','sentiment', 'labels']\ntrain.to_csv('data/train.csv', sep=',', index=False)\ntest.to_csv('data/test.csv', sep=',', index=False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74ff8adece3cc677858c16107195ff5c5ccc1ef | 39,503 | ipynb | Jupyter Notebook | demos/python/demo_toy_images.ipynb | ferjorosa/test-glfm | b219c650d0429ea71b953743730ae53cc122a61b | [
"MIT"
] | 45 | 2017-04-07T03:43:25.000Z | 2021-10-18T20:01:51.000Z | demos/python/demo_toy_images.ipynb | ferjorosa/test-glfm | b219c650d0429ea71b953743730ae53cc122a61b | [
"MIT"
] | 4 | 2017-07-18T13:22:53.000Z | 2019-10-11T01:54:01.000Z | demos/python/demo_toy_images.ipynb | ferjorosa/test-glfm | b219c650d0429ea71b953743730ae53cc122a61b | [
"MIT"
] | 7 | 2017-07-04T01:53:41.000Z | 2020-02-18T07:18:38.000Z | 145.231618 | 18,964 | 0.85391 | [
[
[
"# DEMO_TOY_IMAGES",
"_____no_output_____"
],
[
"Simple illustration of GLFM pipeline, replicating the example of the IBP linear-Gaussian model in (Griffiths and Ghahramani, 2011).",
"_____no_output_____"
]
],
[
[
"# ---------------------------------------------\n# Import necessary libraries\n# ---------------------------------------------\nimport numpy as np # import numpy matrix for calculus with matrices\nimport matplotlib.pyplot as plt # import plotting library\nimport time # import time to be able to measure iteration speed\nimport sys\nsys.path.append('../../src/GLFMpython/')\nimport GLFM\n\nimport pdb",
"_____no_output_____"
],
[
"# ---------------------------------------------\n# 1. GENERATIVE MODEL\n# ---------------------------------------------\nprint '\\n 1. GENERATIVE MODEL\\n'\n\nprint '\\tGenerating feature images (components)...'\n# Btrue contains the features images or components in order to generate the\n# whole set of images\nBtrue = 2*np.array([[0,1.0,0,0,0,0, 1,1,1,0,0,0, 0,1,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0], [0,0.0,0,1,1,1, 0,0,0,1,0,1, 0,0,0,1,1,1, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0], [0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 1,0,0,0,0,0, 1,1,0,0,0,0, 1,1,1,0,0,0], [0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,1,1,1, 0,0,0,0,1,0, 0,0,0,0,1,0]])\nD = Btrue.shape[1] # number of dimensions\nK = Btrue.shape[0] # number of binary images\n\nprint '\\tPlotting feature images (Btrue)...'\nf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\nV = [ax1, ax2, ax3, ax4] # subfigures handlers\nfor i in xrange(len(Btrue)):\n pixels = Btrue[i].reshape(int(np.sqrt(Btrue.shape[1])),int(np.sqrt(Btrue.shape[1])))\n # Plot each feature component k\n V[i].imshow(pixels, cmap='gray',interpolation='nearest')\n V[i].set_ylim(0,5) # set Y-axis limits\n V[i].set_xlim(0,5) # set X-axis limits\n V[i].set_title('Image %d' % (i+1)) # set subplot title\n#plt.ion() # turn on interactive mode for plotting (so that the script continues)\nplt.show() # display images component\nplt.pause(0.0001)\n\nprint '\\tSetting model parameters (ground truth) and generate database...'\nN = 1000 # number of images to be generated\ns2x = 0.5 # noise variance for the observations\n\nprint '\\tGenerating data with N=%d and noise variance s2x=%.2f' % (N,s2x)\n# generate matrix Z\nZtrue = 1.0*(np.random.rand(N,K) < 0.2)#np.random.randint(0,2,size=(N,K)).astype('float64')\n# Next line generates the toy database\nX = np.sqrt(s2x) * np.random.randn(N,D) + np.inner(Ztrue, Btrue.transpose())",
"\n 1. GENERATIVE MODEL\n\n\tGenerating feature images (components)...\n\tPlotting feature images (Btrue)...\n"
],
[
"# ---------------------------------------------\n# 2. INITIALIZATION FOR GLFM ALGORITHM\n# ---------------------------------------------\n\nprint '\\n 2. INITIALIZATION\\n'\n\nprint '\\tInitializing Z...'\nhidden = dict()\nKinit = 2 # initial number of latent features\nhidden['Z'] = np.random.randint(0,2,size=(N,Kinit)).astype('float64')\n\nprint '\\tInitialization of variables needed for the GLFM model...'\ndata = dict()\ndata['X'] = X\ndata['C'] = np.tile('g',(1,X.shape[1]))[0].tostring() # datatype vector\n\n# params is optional\nparams = dict()\nparams['alpha'] = 2 # concentration parameter for the IBP\nparams['Niter'] = 100 # number of algorithm iterations\nparams['maxK'] = 10\nparams['verbose'] = 0 #do not show messages\n",
"\n 2. INITIALIZATION\n\n\tInitializing Z...\n\tInitialization of variables needed for the GLFM model...\n"
],
[
"# ---------------------------------------------\n# 3. RUN INFERENCE FOR GLFM ALGORITHM\n# ---------------------------------------------\nprint '\\tInfering latent features...\\n'\nhidden = GLFM.infer(data, hidden, params)",
"\tInfering latent features...\n\nIn C++: transforming input data...\ndone\n\n\nEntering C++: Running Inference Routine...\n\n\nBack to Python: OK\n\nB_out[D,Kest,maxR] where D=36, Kest=4, maxR=1\n"
],
[
"# ---------------------------------------------\n# 4. PROCESS RESULTS\n# ---------------------------------------------\nKest = hidden['B'].shape[1] # number of inferred latent features\nD = hidden['B'].shape[0] # number of dimensions\n\nprint '\\tPrint inferred latent features...'\nf, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(3, 3, sharex='col', sharey='row')\nV = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]\nfor k in xrange(hidden['B'].shape[1]):\n if k>len(V):\n break;\n\n # visualize each inferred dimension\n Zp = np.zeros(Kest)\n Zp[k] = 1.0\n\n #hidden['B'][:,k]\n #pixels = hidden['B'][:,k].reshape((int(np.sqrt(D)),int(np.sqrt(D))))\n Bpred = GLFM.computeMAP(data['C'],Zp, hidden) # MAP prediction for each dim d\n pixels = Bpred.reshape((int(np.sqrt(D)),int(np.sqrt(D))))\n # Plot\n V[k].imshow(pixels, cmap='gray',interpolation='none')\n V[k].set_ylim(0,5)\n V[k].set_xlim(0,5)\n V[k].set_title('Feature %d' % (k+1))\n#plt.ion() # interactive mode for plotting (script continues)\nplt.show() # display figure\nplt.pause(0.0001)\n",
"\tPrint inferred latent features...\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7500b69dcf5c054c2a2f667b2a0a8d6c0842de7 | 7,624 | ipynb | Jupyter Notebook | Homeworks/2_Linear_Independence_Check/2019.09.17 Linear Independence.ipynb | lev1khachatryan/ASDS_DSP | 9059d737f6934b81a740c79b33756f7ec9ededb3 | [
"MIT"
] | 1 | 2020-12-29T18:02:13.000Z | 2020-12-29T18:02:13.000Z | Homeworks/2_Linear_Independence_Check/2019.09.17 Linear Independence.ipynb | lev1khachatryan/ASDS_DSP | 9059d737f6934b81a740c79b33756f7ec9ededb3 | [
"MIT"
] | null | null | null | Homeworks/2_Linear_Independence_Check/2019.09.17 Linear Independence.ipynb | lev1khachatryan/ASDS_DSP | 9059d737f6934b81a740c79b33756f7ec9ededb3 | [
"MIT"
] | null | null | null | 30.866397 | 91 | 0.402938 | [
[
[
"import numpy as np\n# np.random.seed(42)",
"_____no_output_____"
],
[
"class rankMatrix(object):\n \"\"\"\n Calculate rank of matrix\n \"\"\"\n \n def __init__(self, Matrix): \n self.R = len(Matrix) \n self.C = len(Matrix[0]) \n \n # Function for exchanging two rows of a matrix \n def swap(self, Matrix, row1, row2, col): \n for i in range(col): \n temp = Matrix[row1][i] \n Matrix[row1][i] = Matrix[row2][i] \n Matrix[row2][i] = temp \n \n # Function to Display a matrix \n def Display(self, Matrix, row, col): \n for i in range(row): \n for j in range(col): \n print (\" \" + str(Matrix[i][j])) \n print ('\\n') \n \n # Find rank of a matrix \n def rankOfMatrix(self, Matrix): \n rank = self.C \n for row in range(0, rank, 1): \n \n # Before we visit current row \n # 'row', we make sure that \n # mat[row][0],....mat[row][row-1] \n # are 0. \n \n # Diagonal element is not zero \n if Matrix[row][row] != 0: \n for col in range(0, self.R, 1): \n if col != row: \n \n # This makes all entries of current \n # column as 0 except entry 'mat[row][row]' \n multiplier = (Matrix[col][row] /\n Matrix[row][row]) \n for i in range(rank): \n Matrix[col][i] -= (multiplier *\n Matrix[row][i]) \n \n # Diagonal element is already zero. \n # Two cases arise: \n # 1) If there is a row below it \n # with non-zero entry, then swap \n # this row with that row and process \n # that row \n # 2) If all elements in current \n # column below mat[r][row] are 0, \n # then remvoe this column by \n # swapping it with last column and \n # reducing number of columns by 1. \n else: \n reduce = True\n \n # Find the non-zero element \n # in current column \n for i in range(row + 1, self.R, 1): \n \n # Swap the row with non-zero \n # element with this row. \n if Matrix[i][row] != 0: \n self.swap(Matrix, row, i, rank) \n reduce = False\n break\n \n # If we did not find any row with \n # non-zero element in current \n # columnm, then all values in \n # this column are 0. \n if reduce: \n \n # Reduce number of columns \n rank -= 1\n \n # copy the last column here \n for i in range(0, self.R, 1): \n Matrix[i][row] = Matrix[i][rank] \n \n # process this row again \n row -= 1\n \n # self.Display(Matrix, self.R,self.C) \n return (rank)",
"_____no_output_____"
],
[
"def generate_vectors(vector_count, dim = 3, range_from = 1, range_to = 100):\n \"\"\"\n Generate random vectors in specified dimension\n \n Parameters: \n vector_count (int): count of vectors\n dim (int): dimension of vectors, by default it is 3\n range_from (int): start range for random interval\n range_to (int): end range for random interval\n Returns:\n list\n \"\"\"\n matrix = []\n for i in range(vector_count):\n matrix.append(np.random.randint(range_from, range_to, dim))\n \n# return np.column_stack(matrix)\n return matrix",
"_____no_output_____"
],
[
"def check_independence(matrix):\n \"\"\"\n Check vectors dependency/ independency\n \n Parameters:\n matrix(list) - list of vectors\n \n Returns:\n boolean - True if independent, False otherwise\n \"\"\"\n matrix = np.column_stack(matrix)\n RankMatrix = rankMatrix(matrix)\n rank = RankMatrix.rankOfMatrix(matrix)\n# rank = np.linalg.matrix_rank(matrix)\n vector_count = matrix.shape[1]\n if vector_count == rank:\n print('Linearly Independent')\n return True\n else:\n print('Linearly Dependent')\n return False",
"_____no_output_____"
],
[
"matrix = generate_vectors(vector_count = 3, dim = 4, range_from = 1, range_to = 3)\n# print(np.column_stack(matrix))\nprint(matrix)\ncheck_independence(matrix)",
"[array([2, 1, 2, 2]), array([1, 1, 2, 2]), array([2, 1, 2, 2])]\nLinearly Dependent\n"
]
],
[
[
" ",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7500ec61bab3320aabb0275546db70aee0a7095 | 61,194 | ipynb | Jupyter Notebook | assignments/assignment01/Codecademy.ipynb | edwardd1/phys202-2015-work | b91da6959223a82c4c0b8030c92a789234a4b6b9 | [
"MIT"
] | null | null | null | assignments/assignment01/Codecademy.ipynb | edwardd1/phys202-2015-work | b91da6959223a82c4c0b8030c92a789234a4b6b9 | [
"MIT"
] | null | null | null | assignments/assignment01/Codecademy.ipynb | edwardd1/phys202-2015-work | b91da6959223a82c4c0b8030c92a789234a4b6b9 | [
"MIT"
] | null | null | null | 577.301887 | 59,082 | 0.936219 | [
[
[
"# Codecademy Completion",
"_____no_output_____"
],
[
"This problem will be used for verifying that you have completed the Python course on http://www.codecademy.com/.\n\nHere are the steps to do this verification:\n\n1. Go to the page on http://www.codecademy.com/ that shows your percent completion.\n2. Take a screen shot of that page.\n3. Name the file `codecademy.png` and upload it to this folder.\n4. Run the following cells to display the image in this notebook.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image",
"_____no_output_____"
],
[
"Image(filename='codecademy.png', width='100%')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e75021637708e72c918ac428b30e8ba9e14c9c02 | 18,473 | ipynb | Jupyter Notebook | files/01-StartingWithData-day01.ipynb | UCSBCarpentry/2021-10-19-ucsb-python-online | fd6ac2e2f27a97412a75adc4ea1d750114405e00 | [
"CC-BY-4.0"
] | null | null | null | files/01-StartingWithData-day01.ipynb | UCSBCarpentry/2021-10-19-ucsb-python-online | fd6ac2e2f27a97412a75adc4ea1d750114405e00 | [
"CC-BY-4.0"
] | null | null | null | files/01-StartingWithData-day01.ipynb | UCSBCarpentry/2021-10-19-ucsb-python-online | fd6ac2e2f27a97412a75adc4ea1d750114405e00 | [
"CC-BY-4.0"
] | null | null | null | 27.367407 | 92 | 0.326693 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"# show the jupyter lab root path\n%pwd",
"_____no_output_____"
],
[
"pd.read_csv('data/surveys.csv')",
"_____no_output_____"
],
[
"surveys_df = pd.read_csv('data/surveys.csv')",
"_____no_output_____"
],
[
"surveys_df.head()",
"_____no_output_____"
],
[
"type(surveys_df)",
"_____no_output_____"
],
[
"# show an attribute of the a dataframe\nsurveys_df.dtypes",
"_____no_output_____"
],
[
"# apply a method/function to a dataframe\nsurveys_df.tail()",
"_____no_output_____"
],
[
"myShape = surveys_df.shape",
"_____no_output_____"
],
[
"type(myShape)",
"_____no_output_____"
],
[
"type(surveys_df.dtypes)",
"_____no_output_____"
],
[
"surveys_df.columns\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e75022388e6cc2dda89c1556ba2c4147c5da33e3 | 15,625 | ipynb | Jupyter Notebook | retrain_detection_qat_tf1.ipynb | KeithAzzopardi1998/tutorials | e97b5f3faccc87ff797a60c55b6bba54e1251db4 | [
"Apache-2.0"
] | null | null | null | retrain_detection_qat_tf1.ipynb | KeithAzzopardi1998/tutorials | e97b5f3faccc87ff797a60c55b6bba54e1251db4 | [
"Apache-2.0"
] | null | null | null | retrain_detection_qat_tf1.ipynb | KeithAzzopardi1998/tutorials | e97b5f3faccc87ff797a60c55b6bba54e1251db4 | [
"Apache-2.0"
] | null | null | null | 28.460838 | 498 | 0.516416 | [
[
[
"##### *Copyright 2020 Google LLC*\n*Licensed under the Apache License, Version 2.0 (the \"License\")*",
"_____no_output_____"
]
],
[
[
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Retrain a detection model for Edge TPU with quant-aware training (TF 1.12)",
"_____no_output_____"
],
[
"This notebook uses a set of TensorFlow training scripts to perform transfer-learning on a quantization-aware object detection model and then convert it for compatibility with the [Edge TPU](https://coral.ai/products/).\n\nSpecifically, this tutorial shows you how to retrain a MobileNet V1 SSD model so that it detects two pets: Abyssinian cats and American Bulldogs (from the [Oxford-IIIT Pets Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)), using TensorFlow r1.12.\n\nBeware that, compared to a desktop computer, this training can take *a lot* longer in Colab because Colab provides limited resources for long-running operations. So you'll likely see faster training speeds if you [connect this notebook to a local runtime](https://research.google.com/colaboratory/local-runtimes.html), or instead follow the [tutorial to run this training in Docker](https://coral.ai/docs/edgetpu/retrain-detection/) (which includes more documentation about this process).",
"_____no_output_____"
],
[
"## Import TensorFlow",
"_____no_output_____"
]
],
[
[
"! pip uninstall tensorflow -y\n! pip install tensorflow==1.12",
"_____no_output_____"
],
[
"import tensorflow as tf\nprint(tf.__version__)",
"_____no_output_____"
]
],
[
[
"## Clone the model and training repos",
"_____no_output_____"
]
],
[
[
"! git clone https://github.com/tensorflow/models.git",
"_____no_output_____"
],
[
"! cd models && git checkout f788046ca876a8820e05b0b48c1fc2e16b0955bc",
"_____no_output_____"
],
[
"! git clone https://github.com/google-coral/tutorials.git\n\n! cp -r tutorials/docker/object_detection/scripts/* models/research/",
"_____no_output_____"
]
],
[
[
"## Import dependencies",
"_____no_output_____"
],
[
"For details, see https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md",
"_____no_output_____"
]
],
[
[
"! apt-get install -y python python-tk\n! pip install Cython contextlib2 pillow lxml jupyter matplotlib",
"_____no_output_____"
],
[
"# Get protoc 3.0.0, rather than the old version already in the container\n! wget https://www.github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip\n! unzip protoc-3.0.0-linux-x86_64.zip -d proto3\n! mkdir -p local/bin && mkdir -p local/include\n! mv proto3/bin/* local/bin\n! mv proto3/include/* local/include\n! rm -rf proto3 protoc-3.0.0-linux-x86_64.zip",
"_____no_output_____"
],
[
"# Install pycocoapi\n! git clone --depth 1 https://github.com/cocodataset/cocoapi.git\n! (cd cocoapi/PythonAPI && make -j8)\n! cp -r cocoapi/PythonAPI/pycocotools/ models/research/\n! rm -rf cocoapi",
"_____no_output_____"
],
[
"# Run protoc on the object detection repo (generate .py files from .proto)\n% cd models/research/\n! ../../local/bin/protoc object_detection/protos/*.proto --python_out=.",
"_____no_output_____"
],
[
"import os\nos.environ['PYTHONPATH'] += \":/content/models/research:/content/models/research/slim\"",
"_____no_output_____"
]
],
[
[
"Just to verify everything is correctly set up:",
"_____no_output_____"
]
],
[
[
"! python object_detection/builders/model_builder_test.py",
"_____no_output_____"
]
],
[
[
"## Convert training data to TFRecord",
"_____no_output_____"
],
[
"To train with different images, read [how to configure your own training data](https://coral.ai/docs/edgetpu/retrain-detection/#configure-your-own-training-data).",
"_____no_output_____"
]
],
[
[
"! ./prepare_checkpoint_and_dataset.sh --network_type mobilenet_v1_ssd --train_whole_model false",
"_____no_output_____"
]
],
[
[
"## Perform transfer-learning",
"_____no_output_____"
],
[
"The following script takes several hours to finish in Colab. (You can shorten by reducing the steps, but that reduces the final accuracy.)\n\nIf you didn't already select \"Run all\" then you should run all remaining cells now. That will ensure the rest of the notebook completes while you are away, avoiding the chance that the Colab runtime times-out and you lose the training data before you download the model.",
"_____no_output_____"
]
],
[
[
"%env NUM_TRAINING_STEPS=500\n%env NUM_EVAL_STEPS=100\n\n# If you're retraining the whole model, we suggest thes values:\n# %env NUM_TRAINING_STEPS=50000\n# %env NUM_EVAL_STEPS=2000",
"_____no_output_____"
],
[
"! ./retrain_detection_model.sh --num_training_steps $NUM_TRAINING_STEPS --num_eval_steps $NUM_EVAL_STEPS",
"_____no_output_____"
]
],
[
[
"As training progresses, you can see new checkpoint files appear in the `models/research/learn_pet/train/` directory.",
"_____no_output_____"
],
[
"## Compile for the Edge TPU",
"_____no_output_____"
]
],
[
[
"! ./convert_checkpoint_to_edgetpu_tflite.sh --checkpoint_num $NUM_TRAINING_STEPS",
"_____no_output_____"
],
[
"! curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\n\n! echo \"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list\n\n! sudo apt-get update\n\n! sudo apt-get install edgetpu-compiler\t",
"_____no_output_____"
],
[
"%cd learn_pet/models/\n\n! ls",
"_____no_output_____"
],
[
"! edgetpu_compiler output_tflite_graph.tflite",
"_____no_output_____"
]
],
[
[
"Download the files:",
"_____no_output_____"
]
],
[
[
"from google.colab import files\n\nfiles.download('output_tflite_graph_edgetpu.tflite')\nfiles.download('labels.txt')",
"_____no_output_____"
]
],
[
[
"If you get a \"Failed to fetch\" error here, it's probably because the files weren't done saving. So just wait a moment and try again.\n\nAlso look out for a browser popup that might need approval to download the files.",
"_____no_output_____"
],
[
"## Run the model on the Edge TPU\n\n\n",
"_____no_output_____"
],
[
"You can now run the model on your Coral device with acceleration on the Edge TPU.\n\nTo get started, try using [this code for object detection with the TensorFlow Lite API](https://github.com/google-coral/tflite/tree/master/python/examples/detection). Just follow the instructions on that page to set up your device, copy the `output_tflite_graph_edgetpu.tflite` and `labels.txt` files to your Coral Dev Board or device with a Coral Accelerator, and pass it a photo to see the detected objects.\n\nCheck out more examples for running inference at [coral.ai/examples](https://coral.ai/examples/#code-examples/).",
"_____no_output_____"
],
[
"## Implementation details\n",
"_____no_output_____"
],
[
"\nAll the scripts used in this notebook come from the following locations:<br>\n+ https://github.com/google-coral/tutorials/tree/master/docker/object_detection/scripts\n+ https://github.com/tensorflow/models/tree/r1.13.0/research/object_detection/\n\nMore explanation of the steps in this tutorial is available at\nhttps://coral.ai/docs/edgetpu/retrain-detection/.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7504335a103f6b18e265b9ec6e28fce753bd5e0 | 409,009 | ipynb | Jupyter Notebook | data/electric_device/cleaning_fridge.ipynb | ngoclp2000/Introduction_To_Data_Science | 1712deeb6dca2b880ccf66646d4daa9fc52b9524 | [
"MIT"
] | null | null | null | data/electric_device/cleaning_fridge.ipynb | ngoclp2000/Introduction_To_Data_Science | 1712deeb6dca2b880ccf66646d4daa9fc52b9524 | [
"MIT"
] | null | null | null | data/electric_device/cleaning_fridge.ipynb | ngoclp2000/Introduction_To_Data_Science | 1712deeb6dca2b880ccf66646d4daa9fc52b9524 | [
"MIT"
] | null | null | null | 35.774425 | 181 | 0.296761 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"data = pd.read_csv(\"fridge.csv\")\ndata",
"_____no_output_____"
],
[
"data['number_of_comment'].isin([0]).sum()",
"_____no_output_____"
],
[
"review_predict = data['comment']",
"_____no_output_____"
],
[
"sentiment = pd.read_csv('text.csv',index_col=[0])\nsentiment",
"_____no_output_____"
],
[
"with open('vietnamese_stopwords.txt', encoding=\"utf8\") as f:\n stopwords = []\n for line in f:\n stopwords.append(\"_\".join(line.strip().split()))",
"_____no_output_____"
],
[
"import re\nfrom pyvi import ViTokenizer\ndef preprocessor(text):\n corpus = []\n for i in range(0, len(text)):\n review = re.sub(r\"http\\S+\", \"\", str(text[i]))\n review = re.sub(r\"#\\S+\", \"\", review)\n review = re.sub(r\"@\\S+\", \"\", review)\n review = re.sub('[_]',' ',review)\n review = re.sub('[^a-zA-Z_áàạảãăắằặẵẳâấầẩậẫđíỉìịĩóòỏọõôốồổộỗơớờởợỡéèẹẽẻêếềểệễúùủũụưứừửựữýỳỷỹỵÁÀẢÃẠĂẮẰẲẲẶẴÂẤẦẬẪẨĐÍÌỈỊĨÓÒỎỌÕÔỐỒỔỘỖƠỚỜỞỢỠÉÈẺẸẼÊẾỀỆỂỄÚÙỦŨỤƯỨỪỬỰỮÝỲỶỴỸ]',\n ' ',review)\n review = ViTokenizer.tokenize(review)\n review = review.lower()\n review = review.split()\n review = [word for word in review if not word in set(stopwords)]\n review = ' '.join(review)\n corpus.append(review)\n return corpus",
"_____no_output_____"
],
[
"X = sentiment['text'].values\ncorpus = preprocessor(X)",
"_____no_output_____"
],
[
"from sklearn.feature_extraction.text import TfidfVectorizer\ntfidf_vect = TfidfVectorizer(analyzer='word', max_features=30000)\ntfidf_vect.fit(corpus) \nX_data_tfidf = tfidf_vect.transform(corpus)\n\nfrom sklearn.model_selection import train_test_split\nX = X_data_tfidf\ny = sentiment['text_lb'].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=102)\n\nfrom sklearn.linear_model import LogisticRegression\nclf = LogisticRegression(random_state=0)\nclf.fit(X_train, y_train)\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\npredictions = clf.predict(X_test)\nprint('accuracy:',accuracy_score(y_test,predictions))",
"accuracy: 0.8759615384615385\n"
],
[
"from sklearn import svm\nclassifier = svm.SVC(probability=True)\nclassifier.fit(X_train, y_train)\ntrain_predictions = classifier.predict(X_train)\npredictions = classifier.predict(X_test)\nprint('accuracy:',accuracy_score(y_test,predictions))\nprint('confusion matrix:\\n',confusion_matrix(y_test,predictions))\nprint('classification report:\\n',classification_report(y_test,predictions))",
"accuracy: 0.885576923076923\nconfusion matrix:\n [[ 84 100]\n [ 19 837]]\nclassification report:\n precision recall f1-score support\n\n -1.0 0.82 0.46 0.59 184\n 1.0 0.89 0.98 0.93 856\n\n accuracy 0.89 1040\n macro avg 0.85 0.72 0.76 1040\nweighted avg 0.88 0.89 0.87 1040\n\n"
],
[
"X_1 = review_predict.values\ncorpus1 = preprocessor(X_1)\nX_data_tfidf_1 = tfidf_vect.transform(corpus1)",
"_____no_output_____"
],
[
"proba = classifier.predict_proba(X_data_tfidf_1)",
"_____no_output_____"
],
[
"df1 = pd.DataFrame(data=proba, columns=['Negative', 'Positive'])",
"_____no_output_____"
],
[
"data = pd.concat([data, df1], axis = 1).drop(columns = ['comment'])\ndata",
"_____no_output_____"
],
[
"group_data = data.groupby(['index', 'name', 'price', 'number_of_comment', 'size', 'brand', 'volume(l)', 'number_of_door'], as_index=False, dropna=False).mean()\n#data.groupby(['index', 'name', 'price', 'number_of_comment'], as_index=False).mean()\ngroup_data",
"_____no_output_____"
],
[
"rank_data = group_data.drop(columns=['index', 'name', 'brand', 'size'])\nrank_data.values",
"_____no_output_____"
],
[
"pd.set_option('display.max_rows', rank_data.shape[0]+1)\nrank_data[lambda col :rank_data.columns] = rank_data[lambda col :rank_data.columns].replace(r'\\D+', np.NaN, regex=True)\nrank_data",
"_____no_output_____"
],
[
"from sklearn.impute import KNNImputer\ndata_imputed = rank_data.values\nix = [i for i in range(data_imputed.shape[1]) if i != 6]\nX = data_imputed[:, ix]\nimputer = KNNImputer()\nimputer.fit(X)\nXtrans = imputer.transform(X)\nX.shape",
"_____no_output_____"
],
[
"rank_data = pd.DataFrame(Xtrans,index=rank_data.index, columns=rank_data.columns)",
"_____no_output_____"
],
[
"rank_data",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\n\nStandard_scaler = StandardScaler()\nXtrans = Standard_scaler.fit_transform(Xtrans)",
"_____no_output_____"
],
[
"rank_data_scaled = pd.DataFrame(Xtrans,index=rank_data.index, columns=rank_data.columns)",
"_____no_output_____"
],
[
"from skcriteria import Data, MIN, MAX\nranking = Data(\n rank_data_scaled,\n [MIN, MAX, MAX, MAX, MIN, MAX],\n cnames = rank_data.columns,\n weights = [1,10,1,1,1,1]\n)",
"_____no_output_____"
],
[
"from skcriteria.madm import simple\ndm = simple.WeightedSum(mnorm = \"sum\",wnorm = \"sum\")\ndec = dm.decide(ranking)",
"_____no_output_____"
],
[
"rank_data['rank'] = dec.rank_\nrank_data.sort_values(by=['rank'])",
"_____no_output_____"
],
[
"cols_to_keep = ['index', 'name', 'brand']\ngroup_data = group_data.drop(group_data.columns.difference(cols_to_keep), axis=1)",
"_____no_output_____"
],
[
"pd.concat([group_data, rank_data], axis=1, join=\"inner\").to_csv('fridge_done.csv')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7504ae37c6f7881707085c038d931441a028bfa | 1,299 | ipynb | Jupyter Notebook | python/chapter-2/exercises/EX2-1.ipynb | covuworie/in-all-likelihood | 6638bec8bb4dde7271adb5941d1c66e7fbe12526 | [
"MIT"
] | null | null | null | python/chapter-2/exercises/EX2-1.ipynb | covuworie/in-all-likelihood | 6638bec8bb4dde7271adb5941d1c66e7fbe12526 | [
"MIT"
] | 4 | 2020-03-24T17:53:04.000Z | 2021-08-23T20:16:17.000Z | python/chapter-2/exercises/EX2-1.ipynb | covuworie/in-all-likelihood | 6638bec8bb4dde7271adb5941d1c66e7fbe12526 | [
"MIT"
] | 1 | 2021-11-21T10:24:59.000Z | 2021-11-21T10:24:59.000Z | 18.826087 | 50 | 0.5204 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import nbinom",
"_____no_output_____"
],
[
"n = 213\nx = n - 20\ntheta = np.linspace(0.01, 1.0, num=100)",
"_____no_output_____"
],
[
"likelihood = nbinom.pmf(x, n, theta)",
"_____no_output_____"
],
[
"plt.plot(theta, likelihood)\nplt.title('Negative binomial likelihood')\nplt.xlabel(r'$\\theta$')\nplt.ylabel('Likelihood');",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7504b8d0ff1b35893215fffdaebadf14b7d211c | 61,292 | ipynb | Jupyter Notebook | 1_4_EDA.ipynb | aditya-mengani/network_analysis_cbase_p1_ml | fd06d54053f5cd2ca26f813d63cfe3eafd984e53 | [
"MIT"
] | null | null | null | 1_4_EDA.ipynb | aditya-mengani/network_analysis_cbase_p1_ml | fd06d54053f5cd2ca26f813d63cfe3eafd984e53 | [
"MIT"
] | null | null | null | 1_4_EDA.ipynb | aditya-mengani/network_analysis_cbase_p1_ml | fd06d54053f5cd2ca26f813d63cfe3eafd984e53 | [
"MIT"
] | null | null | null | 65.065817 | 12,172 | 0.607975 | [
[
[
"# Initial Processing & EDA\n\nBy: Aditya Mengani, Ognjen Sosa, Sanjay Elangovan, Song Park, Sophia Skowronski",
"_____no_output_____"
]
],
[
[
"'''Importing basic data analysis packages'''\nimport numpy as np\nimport pandas as pd\nimport csv\nimport warnings\nimport os\nimport time\nimport math\nwarnings.filterwarnings('ignore')\n\n'''Plotting packages'''\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nsns.set(font_scale=1.3)",
"_____no_output_____"
]
],
[
[
"### Function: memory reduction of dataframe",
"_____no_output_____"
]
],
[
[
"def reduce_mem_usage(df, verbose=True):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2 \n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64) \n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64) \n end_mem = df.memory_usage().sum() / 1024**2\n if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df",
"_____no_output_____"
]
],
[
[
"# Load Data\n\nUse `tar -xvzf 20200908_bulk_export.tar.gz` to unzip Crunchbase export (for Windows)\n\nCheck out summary of data from Crunchbase export <a href='https://data.crunchbase.com/docs/daily-csv-export'>here</a>.",
"_____no_output_____"
]
],
[
[
"###########################\n# Pledge 1% Company UUIDs #\n###########################\nprint('='*100)\n\np1 = pd.read_csv('~/Desktop/w207/Project/Data/p1.csv')\nprint('PLEDGE 1%/p1 cols: {}\\nSHAPE: {}'.format(p1.columns.to_list(), p1.shape))\np1 = reduce_mem_usage(p1)\n\n#################\n# Organizations #\n#################\nprint('='*100)\n\norg = pd.read_csv('~/Desktop/w207/Project/Data/organizations.csv')\nprint('ORGANIZATION/org cols: {}\\nSHAPE: {}'.format(org.columns.to_list(), org.shape))\norg = reduce_mem_usage(org)\n\n#org_dscrp = pd.read_csv('files/csv/organization_descriptions.csv')\n#print('\\nORGANIZATION DESCRIPTION/org_dscrp cols: {}\\nSHAPE: {}'.format(org_dscrp.columns.to_list(), org_dscrp.shape))\n#org_dscrp = reduce_mem_usage(org_dscrp)\n\n##########\n# People #\n##########\n#print('='*100)\n\n#ppl = pd.read_csv('files/csv/people.csv')\n#print('PEOPLE/ppl cols: {}\\nSHAPE: {}'.format(ppl.columns.to_list(), ppl.shape))\n#ppl = reduce_mem_usage(ppl)\n\n#ppl_dscrp = pd.read_csv('files/csv/people_descriptions.csv')\n#print('\\nPEOPLE DESCRIPTION/ppl_dscrp cols: {}\\nSHAPE: {}'.format(ppl_dscrp.columns.to_list(), ppl_dscrp.shape))\n#ppl_dscrp = reduce_mem_usage(ppl_dscrp)\n\n#############\n# Financial #\n#############\nprint('='*100)\n\nfund_rnds = pd.read_csv('~/Desktop/w207/Project/Data/funding_rounds.csv')\nprint('FUNDING ROUNDS/fund_rnds cols: {}\\nSHAPE: {}'.format(fund_rnds.columns.to_list(), fund_rnds.shape))\nfund_rnds = reduce_mem_usage(fund_rnds)\n\ninvest = pd.read_csv('~/Desktop/w207/Project/Data/investments.csv')\nprint('\\nINVESTMENTS/invest cols: {}\\nSHAPE: {}'.format(invest.columns.to_list(), invest.shape))\ninvest = reduce_mem_usage(invest)\n\ninvest_prtnr = pd.read_csv('~/Desktop/w207/Project/Data/investment_partners.csv')\nprint('\\nPARTNER INVESTMENTS/invest_prtnr cols: {}\\nSHAPE: {}'.format(invest_prtnr.columns.to_list(), invest_prtnr.shape))\ninvest_prtnr = reduce_mem_usage(invest_prtnr)\n\n########\n# Jobs #\n########\nprint('='*100)\n\njobs = pd.read_csv('~/Desktop/w207/Project/Data/jobs.csv')\nprint('JOBS/jobs cols: {}\\nSHAPE: {}'.format(jobs.columns.to_list(), jobs.shape))\njobs = reduce_mem_usage(jobs)\nprint('='*100)",
"====================================================================================================\nPLEDGE 1%/p1 cols: ['uuid', 'p1_tag', 'p1_date']\nSHAPE: (7822, 3)\nMem. usage decreased to 0.13 Mb (0.0% reduction)\n====================================================================================================\nORGANIZATION/org cols: ['uuid', 'name', 'type', 'permalink', 'cb_url', 'rank', 'created_at', 'updated_at', 'legal_name', 'roles', 'domain', 'homepage_url', 'country_code', 'state_code', 'region', 'city', 'address', 'postal_code', 'status', 'short_description', 'category_list', 'category_groups_list', 'num_funding_rounds', 'total_funding_usd', 'total_funding', 'total_funding_currency_code', 'founded_on', 'last_funding_on', 'closed_on', 'employee_count', 'email', 'phone', 'facebook_url', 'linkedin_url', 'twitter_url', 'logo_url', 'alias1', 'alias2', 'alias3', 'primary_role', 'num_exits']\nSHAPE: (1131310, 41)\nMem. usage decreased to 327.99 Mb (7.3% reduction)\n====================================================================================================\nFUNDING ROUNDS/fund_rnds cols: ['uuid', 'name', 'type', 'permalink', 'cb_url', 'rank', 'created_at', 'updated_at', 'country_code', 'state_code', 'region', 'city', 'investment_type', 'announced_on', 'raised_amount_usd', 'raised_amount', 'raised_amount_currency_code', 'post_money_valuation_usd', 'post_money_valuation', 'post_money_valuation_currency_code', 'investor_count', 'org_uuid', 'org_name', 'lead_investor_uuids']\nSHAPE: (342220, 24)\nMem. usage decreased to 54.18 Mb (13.5% reduction)\n\nINVESTMENTS/invest cols: ['uuid', 'name', 'type', 'permalink', 'cb_url', 'rank', 'created_at', 'updated_at', 'funding_round_uuid', 'funding_round_name', 'investor_uuid', 'investor_name', 'investor_type', 'is_lead_investor']\nSHAPE: (517635, 14)\nMem. usage decreased to 55.29 Mb (0.0% reduction)\n\nPARTNER INVESTMENTS/invest_prtnr cols: ['uuid', 'name', 'type', 'permalink', 'cb_url', 'rank', 'created_at', 'updated_at', 'funding_round_uuid', 'funding_round_name', 'investor_uuid', 'investor_name', 'partner_uuid', 'partner_name']\nSHAPE: (89924, 14)\nMem. usage decreased to 9.61 Mb (0.0% reduction)\n====================================================================================================\nJOBS/jobs cols: ['uuid', 'name', 'type', 'permalink', 'cb_url', 'rank', 'created_at', 'updated_at', 'person_uuid', 'person_name', 'org_uuid', 'org_name', 'started_on', 'ended_on', 'is_current', 'title', 'job_type']\nSHAPE: (1536367, 17)\nMem. usage decreased to 189.01 Mb (0.0% reduction)\n====================================================================================================\n"
],
[
"#########\n#Degrees#\n#########\ndegrees = pd.read_csv('~/Desktop/w207/Project/Data/degrees.csv')\nprint('DEGREES/degrees cols: {}\\nSHAPE: {}'.format(degrees.columns.to_list(), degrees.shape))\ndegrees = reduce_mem_usage(degrees)",
"DEGREES/degrees cols: ['uuid', 'name', 'type', 'permalink', 'cb_url', 'rank', 'created_at', 'updated_at', 'person_uuid', 'person_name', 'institution_uuid', 'institution_name', 'degree_type', 'subject', 'started_on', 'completed_on', 'is_completed']\nSHAPE: (365783, 17)\nMem. usage decreased to 45.00 Mb (0.0% reduction)\n"
],
[
"# Update dataframe columns\norg = org[['uuid', 'name', 'type', 'rank', 'roles', 'country_code', 'region', 'status', 'domain',\n 'category_groups_list', 'total_funding_usd', 'founded_on', 'closed_on',\n 'employee_count', 'primary_role']]\n\nfund_rnds['lead_investor_count']=fund_rnds['lead_investor_uuids'].str.split(',').apply(lambda x: float(len(x)) if ~np.any(pd.isnull(x)) else 0)\nfund_rnds = fund_rnds[['uuid', 'investment_type', 'announced_on', 'raised_amount_usd', \n 'post_money_valuation_usd', 'investor_count','lead_investor_uuids',\n 'lead_investor_count', 'org_uuid', 'org_name']]\n\ninvest = invest[['uuid', 'funding_round_uuid', 'investor_uuid', 'investor_name', 'investor_type', \n 'is_lead_investor']]\n\ninvest_prtnr = invest_prtnr[['uuid', 'funding_round_uuid', 'investor_uuid', 'investor_name', \n 'partner_uuid', 'partner_name']]\n\njobs = jobs[['uuid', 'person_uuid', 'person_name', 'org_uuid', 'org_name', 'started_on', 'ended_on', 'is_current', 'title', 'job_type']]\n\ndegrees = degrees[['uuid','person_uuid','person_name','institution_uuid','institution_name','degree_type','subject','started_on','completed_on','is_completed']]",
"_____no_output_____"
],
[
"# Merge p1 and org dataframes on the organization uuid\ndf = pd.merge(org.copy(),p1.copy(),how='outer',on='uuid')\n\n# Convert Boolean to binary\ndf['p1_tag'] = df['p1_tag'].apply(lambda x: 1 if x == True else 0)\np1['p1_tag'] = 1\n\n# Convert employee_count 'unknown' to np.nan to get accurate missing value count\ndf['employee_count'] = df['employee_count'].apply(lambda x: np.NaN if x == 'unknown' else x)\n\n##############\n# Timestamps #\n##############\n\n# Convert to datetime objects\ndf['p1_date'] = pd.to_datetime(df['p1_date'])\np1['p1_date'] = pd.to_datetime(p1['p1_date'])\n\n# Get OutOfBoundsDatetime error if do not coerce for CB native timestamp columns\ndf['founded_on'] = pd.to_datetime(df['founded_on'], errors='coerce')\n\n# Reduce storage for numerical features\ndf = reduce_mem_usage(df, verbose=False)\n\n# Create new pledge1 dataframe that sorts by chronological order that the company took the pledge\npledge1 = df[df['p1_tag'] == 1].sort_values('p1_date')",
"_____no_output_____"
]
],
[
[
"# Explore Degree Data",
"_____no_output_____"
]
],
[
[
"df2 = pd.merge(df.copy(),degrees.copy(),how='outer',on='uuid')\npledge1_2 = df2[df2['p1_tag'] == 1].sort_values('p1_date')",
"_____no_output_____"
],
[
"pledge1_2.head(10)",
"_____no_output_____"
],
[
"# Exclude rows that have NaN institution_uuid\npledge1_2_degrees = pledge1_2[~pledge1_2['institution_name'].isna()]\ndf2_degrees = df2[~df2['institution_name'].isna()]\n\n# Create count column to sum over\ndf2_degrees['count'] = 1\n\n# Groupby \npledge1_2_degrees = pledge1_2_degrees.groupby(['institution_name'])['p1_tag'].sum().sort_values(ascending=False).reset_index()\ndf2_degrees = df2_degrees.groupby(['institution_name'])['count'].sum().sort_values(ascending=False).reset_index()",
"_____no_output_____"
],
[
"df2_degrees.head(10)",
"_____no_output_____"
],
[
"pledge1_2_degrees.head(10)",
"_____no_output_____"
]
],
[
[
"Can't create plots with degree data because degree data is missing for all P1 companies...:(",
"_____no_output_____"
]
],
[
[
"# Barplots\n_, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 12), sharey=True)\n#sns.barplot(x='p1_tag', y='institution_name', data=pledge1_2_degrees, orient='h', ax=ax[0])\nsns.barplot(x='count', y='institution_name', data=df2_degrees, orient='h', ax=ax[1])\n\n# Labels\nax[0].set_title('Pledge Companies by Employee Count\\n')\nax[0].xaxis.set_ticks_position('top')\nax[0].set_xlabel('Count')\nax[0].set_ylabel('Employee Count')\nax[1].set_title('Crunchbase Companies by Employee Count\\n')\nax[1].xaxis.set_ticks_position('top')\nax[1].set_xlabel('Count')\nax[1].set_ylabel('')\n\n# Plot\nplt.show()\n\ndel df2_degrees, pledge1_2_degrees",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7504bc97584c364ac29dfc868e64d19820b9150 | 23,544 | ipynb | Jupyter Notebook | Wu, You-Final Project-vF.ipynb | fionacandicewu/cogs18-finalproject-chatbot | 54039dd80d93b3d9ed102607f2547a5e8bb501fb | [
"MIT"
] | null | null | null | Wu, You-Final Project-vF.ipynb | fionacandicewu/cogs18-finalproject-chatbot | 54039dd80d93b3d9ed102607f2547a5e8bb501fb | [
"MIT"
] | null | null | null | Wu, You-Final Project-vF.ipynb | fionacandicewu/cogs18-finalproject-chatbot | 54039dd80d93b3d9ed102607f2547a5e8bb501fb | [
"MIT"
] | null | null | null | 40.246154 | 362 | 0.490316 | [
[
[
"Project Description\n\nThis is a chatbot that provides live daily news headlines from the Wall Street Journal, with main three categories of news : political, deals, and economy. First, it gives you a brief view about titles after you ask for certain type of news, then it can provide more details in a specific news based on your further preference. Feel free to check this out!",
"_____no_output_____"
],
[
"This project is also available on Github under the account name fionacandicewu. You may view it here:\nhttps://github.com/fionacandicewu/cogs18-finalproject-chatbot",
"_____no_output_____"
]
],
[
[
"import string\nimport random\nimport nltk\n\nimport requests\nimport json\n\nnews_request = requests.get(url='https://newsapi.org/v2/top-headlines?sources=the-wall-street-journal&apiKey=df21f07e419c41feb602fb9ba2a8456c')\nnews_dict = news_request.json()['articles']",
"_____no_output_____"
],
[
"def is_question(input_string):\n \"\"\"Check if the input is a question.\n \n Parameters\n ----------\n input_string : string\n String that may contain '?'.\n \n Returns\n -------\n output_string : boolean\n Boolean that asserts whether the input contains '?'.\n \"\"\"\n \n if \"?\" in input_string:\n output = True\n else:\n output = False\n return output ",
"_____no_output_____"
],
[
"def remove_punctuation(input_string):\n \"\"\"Remove the punctuations in input string.\n \n Parameters\n ----------\n input_string : string\n String to remove punctuations.\n \n Returns\n -------\n output_string : string\n String without punctuations.\n \"\"\"\n \n out_string =''\n for char in input_string:\n if not char in string.punctuation:\n out_string = out_string + char\n \n return out_string",
"_____no_output_____"
],
[
"def prepare_text(input_string):\n \"\"\"Convert all the inputs to lower case string without any punctuations. \n \n Parameters\n ----------\n input_string : string\n String that will be reorganized.\n \n Returns\n -------\n output_list : list\n List that contains all the lower case splited words of the input.\n \"\"\"\n out_list=[]\n \n # Convert strings to lower case letters\n temp_string = input_string.lower()\n temp_string = remove_punctuation(temp_string)\n \n # Split out the words from the string and list them as items in a list\n out_list = temp_string.split()\n return out_list",
"_____no_output_____"
],
[
"def respond_echo(input_string, number_of_echoes, spacer):\n \"\"\" Repeat input several times.\n \n Parameters\n ----------\n input_string : string\n String that to be repeated by certain nymber of times.\n \n number_of_echoes : integer\n Integer that determines how many times the input will be repeated.\n \n spacer : string \n String to seperate input between the repetition.\n \n Returns\n -------\n echo_output : string\n String to repeat the input by the number of echos with a spacer as separator.\n \"\"\"\n \n if not input_string == None:\n echo_output = (input_string+spacer)*number_of_echoes\n else:\n echo_output = None\n\n return echo_output",
"_____no_output_____"
],
[
"def selector(input_list, check_list, return_list):\n \"\"\" Repeat input several times.\n \n Parameters\n ----------\n input_list : list\n List that contains a list of input.\n \n check_list : list\n List that checks whether input contains certain items.\n \n return_list : list\n List contains items that will be drawn randomly.\n \n Returns\n -------\n output : string\n String to display the result of a random choice in a list given certain conditions met.\n \"\"\"\n \n \n output = None\n for item in input_list:\n if item in check_list:\n output = random.choice(return_list)\n break\n return output ",
"_____no_output_____"
],
[
"def string_concatenator(string1, string2, separator):\n \"\"\" Concatenate strings with separators.\n \n Parameters\n ----------\n string1 : string\n String to be connected with other inputs. \n \n string2 : string\n String to be connected with other inputs.\n \n separator : string\n String to separate various inputs.\n \n Returns\n -------\n output : string\n String to display the result a series of connected inputs. \n \"\"\"\n output = string1+separator+string2\n return output",
"_____no_output_____"
],
[
"def list_to_string(input_list, separator):\n \"\"\" Concatenate items in a list and conver them to a string with separators.\n \n Parameters\n ----------\n input_list : list\n List containing items to be connected.\n \n separator : string\n String to separate various inputs.\n \n Returns\n -------\n output : string\n String to display the result a series of connected item in the input list. \n \"\"\"\n \n output = input_list[0]\n for item in input_list[1:]:\n output=string_concatenator(output, item, separator)\n return output ",
"_____no_output_____"
],
[
"def end_chat(input_list):\n \"\"\" End chat \n \n Parameters\n ----------\n input_list : list\n List containing 'quit' to end chat.\n \n Returns\n -------\n True or False : boolean\n Boolean assures whether to end chat based on whether the input contains 'quit'.\n \"\"\"\n \n if 'quit' in input_list:\n return True\n else: \n return False",
"_____no_output_____"
],
[
"assert callable(end_chat)\nassert isinstance(end_chat(['lalalala', 'have a great day!']), bool)\nassert end_chat(['nope']) == False",
"_____no_output_____"
],
[
"def is_in_list(list_one, list_two):\n \"\"\"Check if any element of list_one is in list_two.\n \n Parameters\n ----------\n list_one : list\n List containing a set of items.\n \n list_two : list\n List containing a set of items that may be in list_one.\n \n Returns\n -------\n True or False : boolean\n Return result of whether the element in list one is in list two.\n \"\"\"\n \n for element in list_one:\n if element in list_two:\n return True\n return False\n\ndef find_in_list(list_one, list_two):\n \"\"\"Find and return an element from list_one that is in list_two, or None otherwise.\n \n Parameters\n ----------\n list_one : list\n List containing a set of items.\n \n list_two : list\n List containing a set of items that may be in list_one.\n \n Returns\n -------\n element : string\n Return result of the element that are both in list one and list two.\n \"\"\"\n \n for element in list_one:\n if element in list_two:\n return element\n return None",
"_____no_output_____"
],
[
"GREETINGS_IN = ['morning', 'hello', 'hi', 'hey', 'hola', 'welcome', 'bonjour', 'greetings']\nGREETINGS_OUT = ['Good Morning!', 'What can I do for you?', 'How can I help you today?', 'Want some fresh market news?', 'Nice Meeting You!', \"Hello, it's nice to talk to you!\", 'Nice to meet you!']\n\nMARKETS_GREETINGS_IN = [\"how's the market today\", \"what's the market like today?\",\"market\", 'markets' ]\nMARKETS_GREETINGS_OUT = ['Political? or Deal? or Economy?']\n\ntype_news = ['political', 'deal', 'economy']\nWHICH_ARTICLE = \"Which % article would you like to read? (Enter the article number)\"\nNO_NEWS = \"Sorry, there are no % headlines today!\"\n\npolitical_news_list = ['trump', 'jinping', 'trade', 'congress', 'sanction', 'china', 'us', 'british', 'europe', 'france', 'asia', 'africa', 'middle east', 'iran', 'iraq', 'korea', 'white house', 'law']\ndeal_news_list = ['merger', 'acquisition', 'investment', 'financing', 'uber', 'ge', 'amazon', 'facebook', 'lyft', 'healthcare', 'technology', 'consumer', 'real estate', 'google','yahoo', 'huawei']\neconomy_news_list = ['unemployment', 'gdp', 'economy', 'purchase', 'plunge', 'plummet', 'oil', 'outlook', '2019', 'next year', 'distress', 'crisis', 'market', 'workforce', 'plan']\n\nCOMP_IN = ['python', 'code', 'computer', 'algorithm', ]\nCOMP_OUT = [\"Python is what I'm made of.\", \\\n \"Did you know I'm made of code!?\", \\\n \"Computers are so magical\", \\\n \"Do you think I'll pass the Turing test?\"]\n\nPEOPLE_IN = ['turing', 'hopper', 'neumann', 'lovelace']\nPEOPLE_OUT = ['was awesome!', 'did so many important things!', 'is someone you should look up :).']\nPEOPLE_NAMES = {'turing': 'Alan', 'hopper': 'Grace', 'neumann': 'John von', 'lovelace': 'Ada'}\n\nJOKES_IN = ['funny', 'hilarious', 'ha', 'haha', 'hahaha', 'lol']\nJOKES_OUT = ['ha', 'haha', 'lol'] \n\nNONO_IN = ['matlab', 'java', 'C++']\nNONO_OUT = [\"I'm sorry, I don't want to talk about\"]\n\nUNKNOWN = ['Oops, such information is not available currently!']\n\nQUESTION = \"I'm too shy to answer questions. What do you want to talk about?\"",
"_____no_output_____"
],
[
"def have_a_chat():\n \"\"\"Main function to run our chatbot.\"\"\"\n \n # Define news article lists outside loop to be able to reference when user inputs numeric choice option\n chosen_article_list = []\n article_list_indices = []\n \n chat = True\n while chat:\n\n # Get a message from the user\n msg = input('INPUT :\\t')\n out_msg = None \n \n # Check if the input is a question\n question = is_question(msg)\n \n # Check if the input is a number (article choice)\n isnumber = msg.isnumeric()\n\n # Prepare the input message\n msg = prepare_text(msg)\n\n # Check for an end msg \n if end_chat(msg):\n out_msg = 'Bye!'\n chat = False\n\n # Check for a selection of topics that we have defined to respond to\n # Here, we will check for a series of topics that we have designed to answer to\n if not out_msg:\n\n # Initialize to collect a list of possible outputs\n outs = []\n\n # Check if the input looks like a greeting, add a greeting output if so\n outs.append(selector(msg, GREETINGS_IN, GREETINGS_OUT))\n \n ## Check if the input looks like a market greeting, add a market greeting output if so\n outs.append(selector(msg, MARKETS_GREETINGS_IN, MARKETS_GREETINGS_OUT))\n \n # Check if the input looks like a computer thing, add a computer output if so\n outs.append(selector(msg, COMP_IN, COMP_OUT))\n \n # Check if the input looks like a computer thing, add a computer output if so\n if is_in_list(msg, type_news):\n \n # Define specific news article lists inside loop to be able to reference when input is a specific news type\n political_article_list = []\n deal_article_list = []\n economy_article_list = []\n article_display = ''\n\n for e in news_dict:\n article_title = e['title']\n article_link = e['url']\n article_summary = e['description']\n \n for word in political_news_list:\n \n # Check if a set of words are in the news article titles to determine whether it is political news\n if word in article_title.lower():\n political_article_list.append({'title': article_title, 'link': article_link, 'summary': article_summary })\n \n for word in deal_news_list:\n \n # Check if a set of words are in the news article titles to determine whether it is deal news\n if word in article_title.lower():\n deal_article_list.append({'title': article_title, 'link': article_link, 'summary': article_summary })\n\n for word in economy_news_list:\n \n # Check if a set of words are in the news article titles to determine whether it is economy news\n if word in article_title.lower():\n economy_article_list.append({'title': article_title, 'link': article_link, 'summary': article_summary })\n \n # Chechk if the input contains news type that users want to read more about\n if \"political\" in msg:\n \n # Check if the news article list today for political news is empty\n if len(political_article_list) != 0: \n for index, article in enumerate(political_article_list):\n \n # Assign an Index number to every news in the list and store this to be able to check later if an option was chosen\n article_list_indices.append(str(index + 1)) \n article_display += ( '(' + str(index + 1) + ') ' + article['title'] + '\\n')\n outs.append(WHICH_ARTICLE.replace('%', 'political') + ' \\n' + article_display)\n \n # store article list to be able to reference it later\n chosen_article_list = political_article_list \n else:\n outs.append(NO_NEWS.replace('%', 'political'))\n elif \"deal\" in msg:\n \n # Check if the news article list today for deal news is empty\n if len(deal_article_list) != 0: \n for index, article in enumerate(deal_article_list):\n \n # Assign an Index number to every news in the list and store this to be able to check later if an option was chosen\n article_list_indices.append(str(index + 1)) \n article_display += ( '(' + str(index + 1) + ') ' + article['title'] + '\\n')\n outs.append(WHICH_ARTICLE.replace('%', 'deal') + ' \\n' + article_display)\n \n # store article list to be able to reference it later \n chosen_article_list = deal_article_list \n else:\n outs.append(NO_NEWS.replace('%', 'deal')) \n elif \"economy\" in msg:\n \n # Check if the news article list today for economy news is empty\n if len(economy_article_list) != 0: \n for index, article in enumerate(economy_article_list):\n \n # Assign an Index number to every news in the list and store this to be able to check later if an option was chosen\n article_list_indices.append(str(index + 1)) \n article_display += ( '(' + str(index + 1) + ') ' + article['title'] + '\\n')\n outs.append(WHICH_ARTICLE.replace('%', 'economy') + ' \\n' + article_display)\n \n # store article list to be able to reference it later\n chosen_article_list = economy_article_list \n else:\n outs.append(NO_NEWS.replace('%', 'economy'))\n \n # Check if the input mentions a person that is specified, add a person output if so\n if is_in_list(msg, PEOPLE_IN):\n name = find_in_list(msg, PEOPLE_IN)\n outs.append(list_to_string([PEOPLE_NAMES[name], name.capitalize(),\n selector(msg, PEOPLE_IN, PEOPLE_OUT)], ' '))\n\n # Check if the input looks like a joke, add a repeat joke output if so\n outs.append(respond_echo(selector(msg, JOKES_IN, JOKES_OUT), 3, ''))\n\n # Check if the input has some words we don't want to talk about, say that, if so\n if is_in_list(msg, NONO_IN):\n outs.append(list_to_string([selector(msg, NONO_IN, NONO_OUT), find_in_list(msg, NONO_IN)], ' '))\n\n # IF YOU WANTED TO ADD MORE TOPICS TO RESPOND TO, YOU COULD ADD THEM IN HERE\n\n # We could have selected multiple outputs from the topic search above (if multiple return possible outputs)\n # We also might have appended None in some cases, meaning we don't have a reply\n # To deal with this, we are going to randomly select an output from the set of outputs that are not None\n options = list(filter(None, outs))\n if options:\n out_msg = random.choice(options)\n\n # If we don't have an output yet, but the input was a question, return msg related to it being a question\n if not out_msg and question:\n out_msg = QUESTION\n \n if not out_msg and isnumber:\n if is_in_list(msg, article_list_indices):\n option_str = ''.join(msg)\n option_int = int(option_str)\n article_index = option_int - 1\n chosen_article = chosen_article_list[article_index]\n \n outs.append(chosen_article['title'] + '\\n\\n Summary: ' + chosen_article['summary'] + '\\n\\n Read more: ' + chosen_article['link'])\n \n options = list(filter(None, outs))\n if options:\n out_msg = random.choice(options)\n\n # Catch-all to say something if msg not caught & processed so far\n if not out_msg:\n out_msg = random.choice(UNKNOWN)\n\n print('OUTPUT:', out_msg)",
"_____no_output_____"
],
[
"have_a_chat()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e75061438b3536f6179d18b9a5de98f021130565 | 4,894 | ipynb | Jupyter Notebook | Gardenkiak/Programazioa/.ipynb_checkpoints/ZerrendenZerrendak-checkpoint.ipynb | mpenagar/Konputaziorako-Sarrera | 1f276cbda42e9d3d0beb716249fadbad348533d7 | [
"MIT"
] | null | null | null | Gardenkiak/Programazioa/.ipynb_checkpoints/ZerrendenZerrendak-checkpoint.ipynb | mpenagar/Konputaziorako-Sarrera | 1f276cbda42e9d3d0beb716249fadbad348533d7 | [
"MIT"
] | null | null | null | Gardenkiak/Programazioa/.ipynb_checkpoints/ZerrendenZerrendak-checkpoint.ipynb | mpenagar/Konputaziorako-Sarrera | 1f276cbda42e9d3d0beb716249fadbad348533d7 | [
"MIT"
] | null | null | null | 18.398496 | 107 | 0.419902 | [
[
[
"# Matrizeak: zerrenden zerrendak\n\nMatrize baten moduko egitura sor daiteke zerrenden zerrendekin:",
"_____no_output_____"
]
],
[
[
"A = [[1,2,3],[4,5,6],[7,8,9]]\nprint(A)\nprint(A[1][2])\n# Hau okerra litzateke\n#print(A[1,2])",
"[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n6\n"
],
[
"# Lerro berriak gehituz, argiagoa.\nA = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\nprint(A)\nprint(A[1][2])\n# Baina... ez da matrize bat!\nA[1].append(100)\nprint(A)",
"[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n6\n[[1, 2, 3], [4, 5, 6, 100], [7, 8, 9]]\n"
],
[
"def matrizea_idatzi(M):\n ilara = len(M)\n zutabe = len(M[0])\n for i in range(ilara):\n for j in range(zutabe):\n print(M[i][j],end=\" \")\n print()\n\nmatrizea_idatzi(A)",
"1 2 3 \n4 5 6 \n7 8 9 \n"
],
[
"def matrizea_idatzi(M):\n for z in M:\n for x in z:\n print(x,end=\" \")\n print()\n\nmatrizea_idatzi(A)",
"1 2 3 \n4 5 6 100 \n7 8 9 \n"
]
],
[
[
"Kontuz espresio literal oso trinkoekin...",
"_____no_output_____"
]
],
[
[
"print([0]*3)\nprint([[0]*3])\nprint([[0]*3]*3)\nA = [[0]*3]*3\nmatrizea_idatzi(A)",
"[0, 0, 0]\n[[0, 0, 0]]\n[[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n0 0 0 \n0 0 0 \n0 0 0 \n"
],
[
"A[1][1] = 5\nmatrizea_idatzi(A)",
"0 5 0 \n0 5 0 \n0 5 0 \n"
],
[
"print(A[0] is A[1], A[0] is A[2], A[1] is A[2])",
"True True True\n"
]
],
[
[
"<table border=\"0\" width=\"100%\" style=\"margin: 0px;\">\n<tr> \n <td style=\"text-align:left\"><a href=\"Zerrendak.ipynb\">< < Zerrendak < <</a></td>\n <td style=\"text-align:right\"><a href=\"NKoteak.ipynb\">> > NKoteak > ></a></td>\n</tr>\n</table>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7506f28e4def720950330c3d4a134d33ec2d3c0 | 226,437 | ipynb | Jupyter Notebook | KKR VS MI/Match Analysis KKR VS MI.ipynb | tacklesta/WPL | b062aa043c62b429c1eb071f43e68b76114abbf1 | [
"MIT"
] | null | null | null | KKR VS MI/Match Analysis KKR VS MI.ipynb | tacklesta/WPL | b062aa043c62b429c1eb071f43e68b76114abbf1 | [
"MIT"
] | null | null | null | KKR VS MI/Match Analysis KKR VS MI.ipynb | tacklesta/WPL | b062aa043c62b429c1eb071f43e68b76114abbf1 | [
"MIT"
] | null | null | null | 112.711299 | 35,880 | 0.805147 | [
[
[
"## Match Analysis",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd \nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Data Cleaning and Exploring",
"_____no_output_____"
]
],
[
[
"matches = pd.read_csv(\"matches.csv\" , index_col = \"id\")\nmatches = matches.iloc[:,:-3]\nmatches.head()",
"_____no_output_____"
],
[
"matches.shape",
"_____no_output_____"
],
[
"matches.winner.unique()",
"_____no_output_____"
]
],
[
[
"### Taking in consideration only KKR VS MI matches",
"_____no_output_____"
]
],
[
[
"KM =matches[np.logical_or(np.logical_and(matches['team1']=='Kolkata Knight Riders',matches['team2']=='Mumbai Indians'),\n np.logical_and(matches['team2']=='Kolkata Knight Riders',matches['team1']=='Mumbai Indians'))]",
"_____no_output_____"
],
[
"KM.head()",
"_____no_output_____"
],
[
"KM.shape",
"_____no_output_____"
],
[
"KM.season.unique()",
"_____no_output_____"
],
[
"KM.isnull().sum()",
"_____no_output_____"
],
[
"KM.describe().iloc[:,2:].T",
"_____no_output_____"
]
],
[
[
"## HEAD TO HEAD",
"_____no_output_____"
]
],
[
[
"KM.groupby(\"winner\")[\"winner\"].count()",
"_____no_output_____"
],
[
"sns.countplot(KM[\"winner\"])\nplt.text(-0.09,17,str(KM['winner'].value_counts()['Mumbai Indians']),size=20,color='white')\nplt.text(0.95,4,str(KM['winner'].value_counts()['Kolkata Knight Riders']),size=20,color='white')\nplt.xlabel('Winner',fontsize=15)\nplt.ylabel('No. of Matches',fontsize=15)\nplt.title('KKR VS MI - head to head',fontsize = 20)",
"_____no_output_____"
],
[
"Season_wise_Match_Winner = pd.DataFrame(KM.groupby([\"season\",\"winner\"])[\"winner\"].count())\nprint(\"Season wise winner of matches between KKR and MI :\")\nSeason_wise_Match_Winner",
"Season wise winner of matches between KKR and MI :\n"
]
],
[
[
"### Winning Percentage",
"_____no_output_____"
]
],
[
[
"Winning_Percentage = KM['winner'].value_counts()/len(KM['winner'])",
"_____no_output_____"
],
[
"print(\" MI winning percentage against KKR(overall) : {}%\".format(int(round(Winning_Percentage[0]*100))))\nprint(\"KKR winning percentage against MI(overall) : {}%\".format(int(round(Winning_Percentage[1]*100))))",
" MI winning percentage against KKR(overall) : 76%\nKKR winning percentage against MI(overall) : 24%\n"
]
],
[
[
"### Performance Based Analysis",
"_____no_output_____"
]
],
[
[
"def performance( team_name , given_df ):\n for value in given_df.groupby('winner'):\n if value[0] == team_name:\n\n total_win_by_runs = sum(list(value[1]['win_by_runs']))\n total_win_by_wickets = sum(list(value[1]['win_by_wickets']))\n \n if 0 in list(value[1]['win_by_runs'].value_counts().index):\n x = value[1]['win_by_runs'].value_counts()[0]\n else:\n x = 0\n \n if 0 in list(value[1]['win_by_wickets'].value_counts().index):\n y = value[1]['win_by_wickets'].value_counts()[0]\n else:\n y = 0\n \n number_of_times_given_team_win_while_defending = (len(value[1]) - x )\n number_of_times_given_team_win_while_chasing = (len(value[1]) - y )\n \n average_runs_by_which_a_given_team_wins_while_defending = total_win_by_runs / number_of_times_given_team_win_while_defending\n average_wickets_by_which_a_given_team_wins_while_chasing = total_win_by_wickets / number_of_times_given_team_win_while_chasing\n \n print('Number of times given team win while defending :' , number_of_times_given_team_win_while_defending ) \n print('Number of times given team win while chasing :' , number_of_times_given_team_win_while_chasing )\n print()\n print('Average runs by which a given team wins while defending : ' ,round(average_runs_by_which_a_given_team_wins_while_defending))\n print('Average wickets by which a given team wins while chasing : ' ,round(average_wickets_by_which_a_given_team_wins_while_chasing))",
"_____no_output_____"
],
[
"performance(\"Kolkata Knight Riders\",KM)",
"Number of times given team win while defending : 3\nNumber of times given team win while chasing : 3\n\nAverage runs by which a given team wins while defending : 36.0\nAverage wickets by which a given team wins while chasing : 7.0\n"
],
[
"performance(\"Mumbai Indians\",KM)",
"Number of times given team win while defending : 8\nNumber of times given team win while chasing : 11\n\nAverage runs by which a given team wins while defending : 40.0\nAverage wickets by which a given team wins while chasing : 6.0\n"
]
],
[
[
"## Toss Analysis",
"_____no_output_____"
]
],
[
[
"Toss_Decision_based_Winner = pd.DataFrame(KM.groupby(['toss_winner',\"toss_decision\",\"winner\"])[\"winner\"].count())\n\nprint(\" No of times toss winning decision leading to match winning : \")\nToss_Decision_based_Winner",
" No of times toss winning decision leading to match winning : \n"
],
[
"Toss_Decision = pd.DataFrame(KM.groupby(['toss_winner',\"toss_decision\"])[\"toss_decision\"].count())\n\nprint (\"Toss winner decision :\")\nToss_Decision",
"Toss winner decision :\n"
],
[
"sns.set(style='whitegrid')\nplt.figure(figsize = (18,8))\nsns.countplot(KM['toss_winner'],palette='Set2',hue=KM['toss_decision'])\nplt.title('Toss decision statistics for both team',fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.xlabel('Toss winner',fontsize=15)\nplt.ylabel('Count',fontsize=15)\nplt.legend(loc='best',fontsize=15)\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the above analysis we can see that mostly both the teams prefer chasing the score after winning the toss",
"_____no_output_____"
]
],
[
[
"sns.set(style='whitegrid')\nplt.figure(figsize = (18,9))\nsns.countplot(KM['toss_winner'],hue=KM['winner'])\nplt.title('Match Winner vs Toss Winner statistics for both team',fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.xlabel('Toss winner',fontsize=15)\nplt.ylabel('Match Winner',fontsize=15)\nplt.legend(loc=\"best\",fontsize=15)",
"_____no_output_____"
]
],
[
[
"Toss Decision based Analysis of both the teams seperately :",
"_____no_output_____"
]
],
[
[
"KKR = KM[KM[\"toss_winner\"]==\"Kolkata Knight Riders\"]\nMI = KM[KM[\"toss_winner\"]==\"Mumbai Indians\"]",
"_____no_output_____"
],
[
"sns.set(style='whitegrid')\nplt.figure(figsize = (18,9))\nsns.countplot(KKR['toss_decision'],hue=KKR['winner'])\nplt.title('Match Winner vs Toss Winner statistics for KKR',fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.xlabel('Toss decision of KKR',fontsize=15)\nplt.ylabel('Match Winner',fontsize=15)\nplt.legend(loc=1,fontsize=15)",
"_____no_output_____"
],
[
"sns.set(style='whitegrid')\nplt.figure(figsize = (18,9))\nsns.countplot(MI['toss_decision'],hue=MI['winner'])\nplt.title('Match Winner vs Toss Winner statistics for MI',fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.xlabel('Toss decision of MI',fontsize=15)\nplt.ylabel('Match Winner',fontsize=15)\nplt.legend(loc=1,fontsize=15)",
"_____no_output_____"
],
[
"player_of_the_match = pd.DataFrame(KM.player_of_match.value_counts())\n\nprint(\"Man of the match :\")\n\nplayer_of_the_match",
"Man of the match :\n"
]
],
[
[
"### Recent Year Performance Analysis",
"_____no_output_____"
]
],
[
[
"cond1 = KM[\"season\"] == 2015\ncond2 = KM[\"season\"] == 2016\ncond3 = KM[\"season\"] == 2017\ncond4 = KM[\"season\"] == 2018\ncond5 = KM[\"season\"] == 2019\nfinal = KM[cond1 | cond2 | cond3 | cond4 | cond5]\nfinal",
"_____no_output_____"
],
[
"final.shape",
"_____no_output_____"
],
[
"player = pd.DataFrame(final.player_of_match.value_counts())\n\nprint(\"Man of the match :\")\nplayer",
"Man of the match :\n"
],
[
"plt.figure(figsize = (10,6))\nsns.countplot(final['winner'])\nplt.title('Match won in recent years',fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.xlabel('Team',fontsize=15)\nplt.ylabel('Win Count',fontsize=15)\n#plt.legend(loc=1,fontsize=15)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"From all the above analysis , we found a complete domination of Mumbai Indians over Kolkata Knight Riders.\n\nSo , based on the analysis related to head to head performance , toss based result etc we can predict Mumbai Indians to be favorite for today's match.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7508496bf8dd9a52594a1aeb9f683a4e9e42c5a | 125,323 | ipynb | Jupyter Notebook | plotting/ex_interpolation.ipynb | nathanielng/python-snippets | d310f074acc1ea7fdb41b2db3ab69406b96a18ca | [
"MIT"
] | 2 | 2020-02-19T00:35:23.000Z | 2020-02-22T10:25:02.000Z | plotting/ex_interpolation.ipynb | nathanielng/python-snippets | d310f074acc1ea7fdb41b2db3ab69406b96a18ca | [
"MIT"
] | 2 | 2020-06-12T09:47:35.000Z | 2020-10-07T00:22:00.000Z | plotting/ex_interpolation.ipynb | nathanielng/python-snippets | d310f074acc1ea7fdb41b2db3ab69406b96a18ca | [
"MIT"
] | null | null | null | 522.179167 | 68,408 | 0.944831 | [
[
[
"# Interpolation & Fitting\n\n## 1. Libraries",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\n\nfrom matplotlib.cm import colors\nfrom scipy.interpolate import interp1d, lagrange\nfrom scipy.optimize import curve_fit\nfrom statsmodels.nonparametric.kernel_regression import KernelReg",
"_____no_output_____"
]
],
[
[
"## 2. Calculations\n\n### 2.1 Original data",
"_____no_output_____"
]
],
[
[
"# Original data points\nx = np.array([1.0, 2.0, 3.0, 4.0, 5.0])\ny = np.array([0.1, 1.2, 3.0, 4.2, 3.8])\n\n# Extra data points for drawing the curves\nx1 = np.linspace(-0.9, 6.7, 50)\nx2 = np.linspace(x.min(), x.max(), 50)",
"_____no_output_____"
]
],
[
[
"### 2.2 Calculate interpolating functions",
"_____no_output_____"
]
],
[
[
"lg = lagrange(x, y)\nlinear = interp1d(x, y, kind='linear')\nspline0 = interp1d(x, y, kind='zero')\nspline1 = interp1d(x, y, kind='slinear')\nspline2 = interp1d(x, y, kind='quadratic')\nspline3 = interp1d(x, y, kind='cubic')",
"_____no_output_____"
]
],
[
[
"## 3. Plots - Lagrange vs Splines",
"_____no_output_____"
]
],
[
[
"fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(7,13))\nax0.plot(x, y, 'bo')\nax0.plot(x1, lg(x1), label='Lagrange')\nax0.plot(x2, linear(x2), label='linear')\nax0.legend(loc='best', frameon=False)\n\nax1.plot(x, y, 'bo')\nax1.plot(x2, lg(x2), label='Lagrange', color='black')\nax1.plot(x2, spline0(x2), label='spline (0th order)', color='red')\nax1.plot(x2, spline1(x2), label='spline (1st order)', color='orange')\nax1.plot(x2, spline2(x2), label='spline (2nd order)', color='green')\nax1.plot(x2, spline3(x2), label='spline (3rd order)', color='blue')\nax1.legend(loc='best', frameon=False);",
"_____no_output_____"
]
],
[
[
"## 4. Comparison - Lagrange, LOWESS, Kernel, Cubic Splines\n\n### 4.1 Function to fit the data",
"_____no_output_____"
]
],
[
[
"def get_interpolated_data(x_train, y_train, x_new, kind, frac=0.1):\n if kind == 'lagrange':\n fn = lagrange(x_train, y_train)\n x_pred = x_new\n y_pred = fn(x_new)\n elif kind == 'lowess':\n xy = sm.nonparametric.lowess(y_train, x_train, frac=frac)\n x_pred = xy[:, 0]\n y_pred = xy[:, 1]\n elif kind == 'kernel':\n kr = KernelReg(y_train, x_train, 'c')\n x_pred = x_new\n y_pred, _ = kr.fit(x_new)\n else:\n fn = interp1d(x, y, kind=kind)\n x_pred = x_new\n y_pred = fn(x_new)\n \n return x_pred, y_pred",
"_____no_output_____"
]
],
[
[
"### 4.2 Data",
"_____no_output_____"
]
],
[
[
"n_pts = 10\nn_all = 50\n\nx = np.linspace(0, 2*np.pi, n_pts)\ny = np.sin(x) + 0.1*(np.random.uniform(0, 1, n_pts) - 0.5)\n\nx_actual = np.linspace(0, 2*np.pi, n_all)\ny_actual = np.sin(x_actual)\n\nx2 = np.linspace(x.min(), x.max(), 50)",
"_____no_output_____"
]
],
[
[
"### 4.3 Plots",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(2, 2, figsize=(12,8))\nkinds = ['lagrange', 'lowess', 'kernel', 'cubic']\ncmap = plt.get_cmap(\"tab10\")\n\ni = 0\nfor row in range(2):\n for col in range(2):\n kind = kinds[i]\n x_p, y_p = get_interpolated_data(x, y, x2, kind)\n axs[row][col].plot(x, y, 'bo')\n axs[row][col].plot(x_actual, y_actual, '--', color='gray', alpha=0.5, label='original')\n axs[row][col].plot(x_p, y_p, label=kind, color=cmap(i))\n axs[row][col].legend(loc='best', frameon=False)\n i += 1",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e750885d4f8f95c401945accc7103a0d27848ac7 | 3,022 | ipynb | Jupyter Notebook | scala/higher_order_functions.ipynb | sudheer/notebooks | fa0583d8348c2d03038564bce25f37d60edc4206 | [
"MIT"
] | null | null | null | scala/higher_order_functions.ipynb | sudheer/notebooks | fa0583d8348c2d03038564bce25f37d60edc4206 | [
"MIT"
] | null | null | null | scala/higher_order_functions.ipynb | sudheer/notebooks | fa0583d8348c2d03038564bce25f37d60edc4206 | [
"MIT"
] | null | null | null | 43.797101 | 599 | 0.588352 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e750afffe2a6e8bcc19750bce22e9a1fb465dbc3 | 189,195 | ipynb | Jupyter Notebook | week-2/index.ipynb | PluVian/deep-learning-study-2017-winter | 0a7c932c6b60009161a98c59010d666a4b687247 | [
"MIT"
] | null | null | null | week-2/index.ipynb | PluVian/deep-learning-study-2017-winter | 0a7c932c6b60009161a98c59010d666a4b687247 | [
"MIT"
] | null | null | null | week-2/index.ipynb | PluVian/deep-learning-study-2017-winter | 0a7c932c6b60009161a98c59010d666a4b687247 | [
"MIT"
] | 5 | 2018-01-21T08:35:27.000Z | 2018-02-19T05:31:52.000Z | 82.294476 | 27,388 | 0.773213 | [
[
[
"# 모두를 위한 딥러닝 week 2\n\n# Tensorflow for logistic classifiers\n\nJimin Sun",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"/usr/local/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\n"
]
],
[
[
"### Example from lab video",
"_____no_output_____"
]
],
[
[
"x_data = [[1,2], [2,3], [3,1], [4,4], [5,3], [6,2]]\ny_data = [[0],[0],[0],[1],[1],[1]]",
"_____no_output_____"
],
[
"X = tf.placeholder(tf.float32, shape = [None, 2])\nY = tf.placeholder(tf.float32, shape = [None, 1]) # Shape에 주의!",
"_____no_output_____"
],
[
"W = tf.Variable(tf.random_normal([2,1]), name = 'weight')\n# 들어오는 값 2개, 나가는 값 1개.\nb = tf.Variable(tf.random_normal([1]), name = 'bias')",
"_____no_output_____"
],
[
"hypothesis = tf.sigmoid(tf.matmul(X, W)+b)\ncost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))\ntrain = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)",
"_____no_output_____"
],
[
"predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)",
"_____no_output_____"
],
[
"accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))",
"_____no_output_____"
]
],
[
[
"What is 'reduce_mean'?\n* tf.reduce_mean 은 평균을 구해주는 operation을 한다. (np.mean과 같은 기능!)\n* 둘의 차이점은, numpy operation은 파이썬 어디서든 사용할 수 있지만, tensorflow operation은 tensorflow **Session** 내에서만 동작한다는 데에 있다.\n* But why **reduce**_mean?\n\n> The key here is the word reduce, a concept from functional programming, which makes it possible for reduce_mean in TensorFlow to keep a running average of the results of computations from a batch of inputs.\n\n> 출처 : https://stackoverflow.com/questions/34236252/difference-between-np-mean-and-tf-reduce-mean-in-numpy-and-tensorflow\n\n> 참고 #1. https://www.python-course.eu/lambda.php\n\n> 참고 #2. https://www.python-course.eu/python3_lambda.php\n\n<img src=\"reduce.png\" height=\"70%\" width=\"70%\">",
"_____no_output_____"
],
[
"<img src='reduce_2.png' height=\"70%\" width=\"70%\">",
"_____no_output_____"
]
],
[
[
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(10001):\n cost_val, _ = sess.run([cost, train], feed_dict = {X: x_data, Y: y_data})\n if step % 1000 == 0:\n print(step, cost_val)\n \n # Accuracy\n h, c, a = sess.run([hypothesis, predicted, accuracy],\n feed_dict = {X: x_data, Y: y_data})\n print(\"\\nHypothesis: \\n\", h, \"\\nCorrect (Y): \\n\", c, \"\\nAccuracy: \\n\", a)",
"0 0.585211\n1000 0.387248\n2000 0.313725\n3000 0.261586\n4000 0.223239\n5000 0.194146\n6000 0.17146\n7000 0.153347\n8000 0.13859\n9000 0.126357\n10000 0.116064\n\nHypothesis: \n [[ 0.0268787 ]\n [ 0.16871433]\n [ 0.21446182]\n [ 0.86153394]\n [ 0.93646842]\n [ 0.97216052]] \nCorrect (Y): \n [[ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 1.]\n [ 1.]] \nAccuracy: \n 1.0\n"
]
],
[
[
"## Let's apply this classifier to another example!\n\nThe same dataset from JaeYoung's example last week :)\n\nWhere to get it : https://www.kaggle.com/c/uci-wine-quality-dataset/data",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"data = pd.read_csv('winequality-data.csv', dtype = 'float32', header=0)",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
]
],
[
[
"## Binary Classifier",
"_____no_output_____"
],
[
"Here, you can spot the **'quality'** column, where the quality of wine is classified to 7 categories.",
"_____no_output_____"
]
],
[
[
"data['quality'].unique()",
"_____no_output_____"
]
],
[
[
"We'll start from a binary classifier, so we label \n\n* wines of quality 3.0, 4.0, 5.0 as class 0,\n* and wines of quality 6.0, 7.0, 8.0, 9.0 as class 1.",
"_____no_output_____"
]
],
[
[
"data.info() \n# You can easily check if there are any missing values in your data.",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3918 entries, 0 to 3917\nData columns (total 13 columns):\nfixed.acidity 3918 non-null float32\nvolatile.acidity 3918 non-null float32\ncitric.acid 3918 non-null float32\nresidual.sugar 3918 non-null float32\nchlorides 3918 non-null float32\nfree.sulfur.dioxide 3918 non-null float32\ntotal.sulfur.dioxide 3918 non-null float32\ndensity 3918 non-null float32\npH 3918 non-null float32\nsulphates 3918 non-null float32\nalcohol 3918 non-null float32\nquality 3918 non-null float32\nid 3918 non-null float32\ndtypes: float32(13)\nmemory usage: 199.0 KB\n"
],
[
"grade = data['quality'] > 5.0\ndata['grade'] = grade.astype(np.float32)",
"_____no_output_____"
],
[
"data.head()\n# new column 'grade' is added at the end",
"_____no_output_____"
],
[
"y_data = data.values[:,[-1]]\nx_data = data.values[:,:-3] # columns quality, id, grade are excluded",
"_____no_output_____"
],
[
"y_data.shape, x_data.shape",
"_____no_output_____"
],
[
"X = tf.placeholder(tf.float32, shape = [None, 11])\nY = tf.placeholder(tf.float32, shape = [None, 1])",
"_____no_output_____"
],
[
"W = tf.Variable(tf.random_normal([11,1]), name = \"weight\")\nb = tf.Variable(tf.random_normal([1]), name = \"bias\")",
"_____no_output_____"
],
[
"hypothesis = tf.sigmoid(tf.matmul(X,W) + b)\ncost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis))\ntrain = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)",
"_____no_output_____"
],
[
"predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n feed = {X: x_data, Y: y_data}\n for step in range(1001):\n cost_val, _ = sess.run([cost, train], feed_dict = feed)\n if step % 100 == 0:\n print(step, cost_val)\n \n h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict = feed)\n print('\\nHypothesis: \\n', h, '\\nCorrect (Y)\\n: ',c, '\\nAccuracy\\n:', a)",
"0 nan\n100 nan\n200 nan\n300 nan\n400 nan\n500 nan\n600 nan\n700 nan\n800 nan\n900 nan\n1000 nan\n\nHypothesis: \n [[ nan]\n [ nan]\n [ nan]\n ..., \n [ nan]\n [ nan]\n [ nan]] \nCorrect (Y)\n: [[ 0.]\n [ 0.]\n [ 0.]\n ..., \n [ 0.]\n [ 0.]\n [ 0.]] \nAccuracy\n: 0.335375\n"
]
],
[
[
"???????\n\n## Data normalization was needed!",
"_____no_output_____"
],
[
"There are many ways to normalize (= change scales to $[0,1]$) data, but this time we'll use **min_max_normalization**. \n\nThe idea here is to apply this formula below.\n\n\n$$min\\_max(x_{ij}) = \\dfrac{x_{ij} - min_{1 \\leq i \\leq n}(x_{ij})}{(max_{1 \\leq i \\leq n}(x_{ij})-min_{1 \\leq i \\leq n}(x_{ij}))}$$\n\n\n\n* $min_{1 \\leq i \\leq n}(x_{ij})$ : $j$ 번째 열에서 가장 **작은** 데이터값\n* $max_{1 \\leq i \\leq n}(x_{ij})$ : $j$ 번째 열에서 가장 **큰** 데이터값\n\n데이터에서 가장 작은 값은 0, 데이터에서 가장 큰 값은 1로 변환시킨다.",
"_____no_output_____"
]
],
[
[
"def min_max_normalized(data):\n col_max = np.max(data, axis=0) # axis=0 : 열, axis=1 : 행\n col_min = np.min(data, axis=0)\n return np.divide(data - col_min, col_max - col_min)",
"_____no_output_____"
],
[
"x_data = min_max_normalized(x_data)",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n feed = {X: x_data, Y: y_data}\n for step in range(20001):\n cost_val, _ = sess.run([cost, train], feed_dict = feed)\n if step % 2000 == 0:\n print(step, cost_val)\n \n h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict = feed)\n print('\\nHypothesis: \\n', h, '\\nCorrect (Y)\\n: ',c, '\\nAccuracy\\n:', a)",
"0 1.53646\n2000 0.611673\n4000 0.596641\n6000 0.585185\n8000 0.576219\n10000 0.569024\n12000 0.563115\n14000 0.558161\n16000 0.553931\n18000 0.550264\n20000 0.547041\n\nHypothesis: \n [[ 0.83623403]\n [ 0.87260079]\n [ 0.53597355]\n ..., \n [ 0.79522443]\n [ 0.75276494]\n [ 0.38663006]] \nCorrect (Y)\n: [[ 1.]\n [ 1.]\n [ 1.]\n ..., \n [ 1.]\n [ 1.]\n [ 0.]] \nAccuracy\n: 0.701634\n"
]
],
[
[
"Even after 20,000 steps, the accuracy doesn't look high enough. $(\\approx 70\\%)$\n\n### Why?\n\nLet's plot the data, and see if we can find a reason there.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"data.describe()",
"_____no_output_____"
]
],
[
[
"*The variance of most columns seem extremely low.*",
"_____no_output_____"
]
],
[
[
"sns.FacetGrid(data, hue = 'grade', size=6).map(plt.scatter, 'free.sulfur.dioxide', 'total.sulfur.dioxide').add_legend()\nplt.show()",
"_____no_output_____"
],
[
"sns.FacetGrid(data, hue = 'grade', size=6).map(plt.scatter, 'fixed.acidity', 'residual.sugar').add_legend()\nplt.show()",
"_____no_output_____"
],
[
"sns.FacetGrid(data, hue = 'grade', size=6).map(plt.scatter, 'density', 'citric.acid').add_legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"The data itself doesn't really seem linearly separable :(\n\nThis kind of problem will lead us to our next session in the lecture, such as Neural Networks!",
"_____no_output_____"
],
[
"## Multiclass classifier",
"_____no_output_____"
],
[
"### Example from lab video\nA quick review of the example dealt in the lecture.",
"_____no_output_____"
]
],
[
[
"xy = np.loadtxt('data-04-zoo.csv', delimiter=',', dtype=np.float32)",
"_____no_output_____"
],
[
"xy",
"_____no_output_____"
],
[
"x_data = xy[:, 0:-1]\ny_data = xy[:, [-1]]",
"_____no_output_____"
],
[
"# nb_classes = 7\n\n# For cases when you don't want to set a specific number to 'nb_classes', \n# or if you don't exactly know the number of categories ,\n# this might be a more generalized method.\n\nnp.unique(y_data)",
"_____no_output_____"
],
[
"nb_classes = len(np.unique(y_data))",
"_____no_output_____"
],
[
"X = tf.placeholder(tf.float32, [None, 16])\nY = tf.placeholder(tf.int32, [None, 1])\nY_one_hot = tf.one_hot(Y, nb_classes)\nY_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])",
"_____no_output_____"
],
[
"W = tf.Variable(tf.random_normal([16, nb_classes]), name='weight')\nb = tf.Variable(tf.random_normal([nb_classes]), name='bias')",
"_____no_output_____"
],
[
"logits = tf.matmul(X, W)+b\nhypothesis = tf.nn.softmax(logits)",
"_____no_output_____"
],
[
"cost_i = tf.nn.softmax_cross_entropy_with_logits(logits = logits, \n labels = Y_one_hot)",
"_____no_output_____"
],
[
"cost = tf.reduce_mean(cost_i)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(cost)",
"_____no_output_____"
],
[
"prediction = tf.argmax(hypothesis, 1)\n# tf.argmax : Returns the index with the largest value across axes of a tensor.\ncorrect_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n for step in range(2001):\n sess.run(optimizer, feed_dict = {X: x_data, Y: y_data})\n \n if step % 100 == 0:\n loss, acc = sess.run([cost, accuracy],\n feed_dict = {X: x_data, Y: y_data})\n print(\"Step: {:5}\\tLoss: {:.3f}\\tAcc: {:.2%}\".format(\n step, loss, acc))\n \n pred = sess.run(prediction, feed_dict = {X: x_data})\n \n # y_data: (N,1) = flatten => (N, ) matches pred.shape\n for p, y in zip(pred, y_data.flatten()):\n print(\"[{}] Prediction: {} True Y: {}\".format(p == int(y), p, int(y)))",
"Step: 0\tLoss: 5.291\tAcc: 5.94%\nStep: 100\tLoss: 0.779\tAcc: 80.20%\nStep: 200\tLoss: 0.459\tAcc: 82.18%\nStep: 300\tLoss: 0.333\tAcc: 90.10%\nStep: 400\tLoss: 0.264\tAcc: 93.07%\nStep: 500\tLoss: 0.218\tAcc: 97.03%\nStep: 600\tLoss: 0.186\tAcc: 98.02%\nStep: 700\tLoss: 0.162\tAcc: 98.02%\nStep: 800\tLoss: 0.143\tAcc: 98.02%\nStep: 900\tLoss: 0.128\tAcc: 98.02%\nStep: 1000\tLoss: 0.115\tAcc: 98.02%\nStep: 1100\tLoss: 0.105\tAcc: 98.02%\nStep: 1200\tLoss: 0.097\tAcc: 100.00%\nStep: 1300\tLoss: 0.089\tAcc: 100.00%\nStep: 1400\tLoss: 0.083\tAcc: 100.00%\nStep: 1500\tLoss: 0.078\tAcc: 100.00%\nStep: 1600\tLoss: 0.073\tAcc: 100.00%\nStep: 1700\tLoss: 0.069\tAcc: 100.00%\nStep: 1800\tLoss: 0.065\tAcc: 100.00%\nStep: 1900\tLoss: 0.061\tAcc: 100.00%\nStep: 2000\tLoss: 0.058\tAcc: 100.00%\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 4 True Y: 4\n[True] Prediction: 4 True Y: 4\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 4 True Y: 4\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 2 True Y: 2\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 2 True Y: 2\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 2 True Y: 2\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 4 True Y: 4\n[True] Prediction: 2 True Y: 2\n[True] Prediction: 2 True Y: 2\n[True] Prediction: 3 True Y: 3\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 1 True Y: 1\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 5 True Y: 5\n[True] Prediction: 0 True Y: 0\n[True] Prediction: 6 True Y: 6\n[True] Prediction: 1 True Y: 1\n"
],
[
"true_y = y_data.flatten()",
"_____no_output_____"
],
[
"unique, counts = np.unique(pred, return_counts=True)\ndict(zip(unique, counts))",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score, confusion_matrix\nimport itertools",
"_____no_output_____"
],
[
"def plot_confusion_matrix(y_true,y_pred):\n cm_array = confusion_matrix(y_true,y_pred)\n true_labels = np.arange(0,7)\n pred_labels = np.arange(0,7)\n plt.imshow(cm_array[:-1,:-1], interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\", fontsize=16)\n cbar = plt.colorbar(fraction=0.046, pad=0.04)\n cbar.set_label('Number of images', rotation=270, labelpad=30, fontsize=12)\n xtick_marks = np.arange(len(true_labels))\n ytick_marks = np.arange(len(pred_labels))\n plt.xticks(xtick_marks, true_labels, rotation=90)\n plt.yticks(ytick_marks,pred_labels)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=14)\n plt.xlabel('Predicted label', fontsize=14)\n plt.tight_layout()\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 5\n fig_size[1] = 5\n plt.rcParams[\"figure.figsize\"] = fig_size",
"_____no_output_____"
],
[
"plot_confusion_matrix(true_y,pred)",
"_____no_output_____"
]
],
[
[
"### Back to our wine quality dataset!",
"_____no_output_____"
]
],
[
[
"x_data = data.values[:,:-3]\nx_data = min_max_normalized(x_data)\nx_data",
"_____no_output_____"
],
[
"y_data = (data.values[:,[-3]]).astype(np.int32) # quality column으로 다시 설정",
"_____no_output_____"
],
[
"x_data.shape, y_data.shape",
"_____no_output_____"
],
[
"num_class = len(data['quality'].unique())\nnum_class",
"_____no_output_____"
],
[
"X = tf.placeholder(tf.float32, [None, 11])\nY = tf.placeholder(tf.int32, [None, 1])",
"_____no_output_____"
],
[
"Y_one_hot = tf.one_hot(Y, num_class) # one hot\nY_one_hot = tf.reshape(Y_one_hot, [-1, num_class])",
"_____no_output_____"
],
[
"W = tf.Variable(tf.random_normal([11, num_class]), name = 'weight')\nb = tf.Variable(tf.random_normal([num_class]), name = 'bias')",
"_____no_output_____"
],
[
"logits = tf.matmul(X, W)+b\nhypothesis = tf.nn.softmax(logits)",
"_____no_output_____"
],
[
"cost_i = tf.nn.softmax_cross_entropy_with_logits(logits = logits, \n labels = Y_one_hot)",
"_____no_output_____"
],
[
"cost = tf.reduce_mean(cost_i)",
"_____no_output_____"
],
[
"optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)",
"_____no_output_____"
],
[
"prediction = tf.argmax(hypothesis, 1)\ncorrect_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n for step in range(10000):\n sess.run(optimizer, feed_dict = {X: x_data, Y: y_data})\n if step % 1000 == 0:\n loss, acc = sess.run([cost, accuracy], \n feed_dict = {X: x_data, Y: y_data})\n print(\"Step: {:5}\\tLoss: {:.3f}\\tAcc: {:.2%}\".format(\n step, loss, acc))\n \n pred = sess.run(prediction, feed_dict = {X: x_data})\n\n\n# for p, y in zip(pred, y_data.flatten()):\n# print(\"[{}] Prediction: {} True Y: {}\".format(p == int(y), p, int(y))) ",
"Step: 0\tLoss: 1.419\tAcc: 20.34%\nStep: 1000\tLoss: 0.801\tAcc: 43.06%\nStep: 2000\tLoss: 0.771\tAcc: 43.85%\nStep: 3000\tLoss: 0.760\tAcc: 44.10%\nStep: 4000\tLoss: 0.754\tAcc: 44.87%\nStep: 5000\tLoss: 0.749\tAcc: 45.02%\nStep: 6000\tLoss: 0.745\tAcc: 45.18%\nStep: 7000\tLoss: 0.742\tAcc: 45.56%\nStep: 8000\tLoss: 0.739\tAcc: 45.92%\nStep: 9000\tLoss: 0.736\tAcc: 46.63%\n"
],
[
"pred_y = pred.astype(np.int32)\npred_y",
"_____no_output_____"
],
[
"actual_y = y_data.flatten().astype(np.int32)\nactual_y",
"_____no_output_____"
],
[
"np.unique(pred_y)",
"_____no_output_____"
],
[
"np.unique(actual_y)",
"_____no_output_____"
],
[
"def plot_confusion_matrix(y_true,y_pred):\n cm_array = confusion_matrix(y_true,y_pred)\n true_labels = np.arange(3,10)\n pred_labels = np.arange(3,10)\n plt.imshow(cm_array[:-1,:-1], interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\", fontsize=16)\n cbar = plt.colorbar(fraction=0.046, pad=0.04)\n cbar.set_label('Number of images', rotation=270, labelpad=30, fontsize=12)\n xtick_marks = np.arange(len(true_labels))\n ytick_marks = np.arange(len(pred_labels))\n plt.xticks(xtick_marks, true_labels, rotation=90)\n plt.yticks(ytick_marks,pred_labels)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=14)\n plt.xlabel('Predicted label', fontsize=14)\n plt.tight_layout()\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 5\n fig_size[1] = 5\n plt.rcParams[\"figure.figsize\"] = fig_size",
"_____no_output_____"
],
[
"print(\"Accuracy: {0:0.1f}%\".format(accuracy_score(actual_y,pred_y)*100))\nplot_confusion_matrix(actual_y,pred_y)",
"Accuracy: 47.0%\n"
],
[
"data['quality'].value_counts()",
"_____no_output_____"
],
[
"data['quality'].value_counts().plot.pie(subplots=False)\nplt.show()",
"_____no_output_____"
]
],
[
[
"It seems that the classifier has assigned most data points to the class 5 and 6, since these two took up 75% of all classes. :(",
"_____no_output_____"
]
],
[
[
"alist = ['a1', 'a2', 'a3']\nblist = ['b1', 'b2', 'b3']",
"_____no_output_____"
],
[
"print(set(zip(alist, blist)))",
"{('a2', 'b2'), ('a3', 'b3'), ('a1', 'b1')}\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e750c64b5f9d51bbf315313691cd2999c1dee831 | 13,569 | ipynb | Jupyter Notebook | Software_Engineering_Practices/magic_methods.ipynb | tthoraldson/MachineLearningNanodegree | a6a556993ac034f66bd37f03d60cd44bd26512c4 | [
"FTL"
] | null | null | null | Software_Engineering_Practices/magic_methods.ipynb | tthoraldson/MachineLearningNanodegree | a6a556993ac034f66bd37f03d60cd44bd26512c4 | [
"FTL"
] | null | null | null | Software_Engineering_Practices/magic_methods.ipynb | tthoraldson/MachineLearningNanodegree | a6a556993ac034f66bd37f03d60cd44bd26512c4 | [
"FTL"
] | null | null | null | 38.991379 | 308 | 0.489793 | [
[
[
"# Magic Methods\n\nBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.\n\nAs in previous exercises, there is an answer key that you can look at if you get stuck. Click on the \"Jupyter\" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.",
"_____no_output_____"
]
],
[
[
"import math\nimport matplotlib.pyplot as plt\n\nclass Gaussian():\n \"\"\" Gaussian distribution class for calculating and \n visualizing a Gaussian distribution.\n \n Attributes:\n mean (float) representing the mean value of the distribution\n stdev (float) representing the standard deviation of the distribution\n data_list (list of floats) a list of floats extracted from the data file\n \n \"\"\"\n def __init__(self, mu = 0, sigma = 1):\n \n self.mean = mu\n self.stdev = sigma\n self.data = []\n\n\n \n def calculate_mean(self):\n \n \"\"\"Method to calculate the mean of the data set.\n \n Args: \n None\n \n Returns: \n float: mean of the data set\n \n \"\"\"\n \n #TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data\n # Change the value of the mean attribute to be the mean of the data set\n # Return the mean of the data set \n self.mean = 1.0 * sum(self.data) / len(self.data)\n return self.mean\n \n\n\n def calculate_stdev(self, sample=True):\n\n \"\"\"Method to calculate the standard deviation of the data set.\n \n Args: \n sample (bool): whether the data represents a sample or population\n \n Returns: \n float: standard deviation of the data set\n \n \"\"\"\n\n # TODO:\n # Calculate the standard deviation of the data set\n # \n # The sample variable determines if the data set contains a sample or a population\n # If sample = True, this means the data is a sample. \n # Keep the value of sample in mind for calculating the standard deviation\n #\n # Make sure to update self.stdev and return the standard deviation as well \n \n if sample:\n n = len(self.data) - 1\n else:\n n = len(self.data)\n \n mean = self.mean\n \n sigma = 0\n for d in self.data:\n sigma += (d - mean) ** 2\n \n sigma = math.sqrt(sigma / n)\n self.stdev = sigma\n return self.stdev\n \n\n def read_data_file(self, file_name, sample=True):\n \n \"\"\"Method to read in data from a txt file. The txt file should have\n one number (float) per line. The numbers are stored in the data attribute. \n After reading in the file, the mean and standard deviation are calculated\n \n Args:\n file_name (string): name of a file to read from\n \n Returns:\n None\n \n \"\"\"\n \n # This code opens a data file and appends the data to a list called data_list\n with open(file_name) as file:\n data_list = []\n line = file.readline()\n while line:\n data_list.append(int(line))\n line = file.readline()\n file.close()\n \n # TODO: \n # Update the self.data attribute with the data_list\n # Update self.mean with the mean of the data_list. \n # You can use the calculate_mean() method with self.calculate_mean()\n # Update self.stdev with the standard deviation of the data_list. Use the \n # calcaulte_stdev() method.\n self.data = data_list\n self.mean = self.calculate_mean()\n self.stdev = self.calculate_stdev(sample)\n \n \n def plot_histogram(self):\n \"\"\"Method to output a histogram of the instance variable data using \n matplotlib pyplot library.\n \n Args:\n None\n \n Returns:\n None\n \"\"\"\n \n # TODO: Plot a histogram of the data_list using the matplotlib package.\n # Be sure to label the x and y axes and also give the chart a title\n plt.hist(self.data)\n plt.title('Histogram')\n plt.xlabel('data')\n plt.ylabel('count')\n \n \n \n def pdf(self, x):\n \"\"\"Probability density function calculator for the gaussian distribution.\n \n Args:\n x (float): point for calculating the probability density function\n \n \n Returns:\n float: probability density function output\n \"\"\"\n \n # TODO: Calculate the probability density function of the Gaussian distribution\n # at the value x. You'll need to use self.stdev and self.mean to do the calculation\n return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2) \n\n def plot_histogram_pdf(self, n_spaces = 50):\n\n \"\"\"Method to plot the normalized histogram of the data and a plot of the \n probability density function along the same range\n \n Args:\n n_spaces (int): number of data points \n \n Returns:\n list: x values for the pdf plot\n list: y values for the pdf plot\n \n \"\"\"\n \n #TODO: Nothing to do for this method. Try it out and see how it works.\n \n mu = self.mean\n sigma = self.stdev\n\n min_range = min(self.data)\n max_range = max(self.data)\n \n # calculates the interval between x values\n interval = 1.0 * (max_range - min_range) / n_spaces\n\n x = []\n y = []\n \n # calculate the x values to visualize\n for i in range(n_spaces):\n tmp = min_range + interval*i\n x.append(tmp)\n y.append(self.pdf(tmp))\n\n # make the plots\n fig, axes = plt.subplots(2,sharex=True)\n fig.subplots_adjust(hspace=.5)\n axes[0].hist(self.data, density=True)\n axes[0].set_title('Normed Histogram of Data')\n axes[0].set_ylabel('Density')\n\n axes[1].plot(x, y)\n axes[1].set_title('Normal Distribution for \\n Sample Mean and Sample Standard Deviation')\n axes[0].set_ylabel('Density')\n plt.show()\n\n return x, y\n\n def __add__(self, other):\n \n \"\"\"Magic method to add together two Gaussian distributions\n \n Args:\n other (Gaussian): Gaussian instance\n \n Returns:\n Gaussian: Gaussian distribution\n \n \"\"\"\n \n # TODO: Calculate the results of summing two Gaussian distributions\n # When summing two Gaussian distributions, the mean value is the sum\n # of the means of each Gaussian.\n #\n # When summing two Gaussian distributions, the standard deviation is the\n # square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)\n \n # create a new Gaussian object\n result = Gaussian()\n \n # TODO: calculate the mean and standard deviation of the sum of two Gaussians\n result.mean = self.mean + other.mean\n result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)\n \n return result\n\n def __repr__(self):\n \n \"\"\"Magic method to output the characteristics of the Gaussian instance\n \n Args:\n None\n \n Returns:\n string: characteristics of the Gaussian\n \n \"\"\"\n \n # TODO: Return a string in the following format - \n # \"mean mean_value, standard deviation standard_deviation_value\"\n # where mean_value is the mean of the Gaussian distribution\n # and standard_deviation_value is the standard deviation of\n # the Gaussian.\n # For example \"mean 3.5, standard deviation 1.3\"\n \n return \"mean: {}, standard deviation: {}\".format(self.mean, self.stdev)",
"_____no_output_____"
],
[
"# Unit tests to check your solution\n\nimport unittest\n\nclass TestGaussianClass(unittest.TestCase):\n def setUp(self):\n self.gaussian = Gaussian(25, 2)\n\n def test_initialization(self): \n self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')\n self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')\n\n def test_pdf(self):\n self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\\\n 'pdf function does not give expected result') \n\n def test_meancalculation(self):\n self.gaussian.read_data_file('numbers.txt', True)\n self.assertEqual(self.gaussian.calculate_mean(),\\\n sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')\n\n def test_stdevcalculation(self):\n self.gaussian.read_data_file('numbers.txt', True)\n self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')\n self.gaussian.read_data_file('numbers.txt', False)\n self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')\n\n def test_add(self):\n gaussian_one = Gaussian(25, 3)\n gaussian_two = Gaussian(30, 4)\n gaussian_sum = gaussian_one + gaussian_two\n \n self.assertEqual(gaussian_sum.mean, 55)\n self.assertEqual(gaussian_sum.stdev, 5)\n\n def test_repr(self):\n gaussian_one = Gaussian(25, 3)\n \n self.assertEqual(str(gaussian_one), \"mean: 25, standard deviation: 3\")\n \ntests = TestGaussianClass()\n\ntests_loaded = unittest.TestLoader().loadTestsFromModule(tests)\n\nunittest.TextTestRunner().run(tests_loaded)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
e750cf65df611c907553b8a0f9695549e1c7b534 | 59,908 | ipynb | Jupyter Notebook | notebooks/FastTreeSHAP_Census_Income.ipynb | linkedin/FastTreeSHAP | 58e8288553f739da31ba36891d7957367434d471 | [
"BSD-2-Clause"
] | 261 | 2022-02-09T05:48:01.000Z | 2022-03-31T19:42:00.000Z | notebooks/FastTreeSHAP_Census_Income.ipynb | linkedin/FastTreeSHAP | 58e8288553f739da31ba36891d7957367434d471 | [
"BSD-2-Clause"
] | 4 | 2022-03-22T16:56:29.000Z | 2022-03-30T06:07:32.000Z | notebooks/FastTreeSHAP_Census_Income.ipynb | linkedin/FastTreeSHAP | 58e8288553f739da31ba36891d7957367434d471 | [
"BSD-2-Clause"
] | 18 | 2022-02-09T05:51:01.000Z | 2022-03-30T04:44:36.000Z | 36.396112 | 1,064 | 0.629382 | [
[
[
"# FastTreeSHAP in Census Income Data",
"_____no_output_____"
],
[
"This notebook contains usages and detailed comparisons of FastTreeSHAP v1, FastTreeSHAP v2 and the original TreeSHAP in **binary classification** problems using scikit-learn, XGBoost and LightGBM. It also contains the discussions of automatic algorithm selection. It may take a few minutes to run through all code in this notebook. The source of census income data is https://archive.ics.uci.edu/ml/datasets/census+income.",
"_____no_output_____"
],
[
"## Load Python libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score, accuracy_score\nimport xgboost as xgb\nimport lightgbm as lgb\nimport fasttreeshap\nimport os\nimport time",
"_____no_output_____"
]
],
[
[
"## Pre-process training and testing data",
"_____no_output_____"
]
],
[
[
"# source of data: https://archive.ics.uci.edu/ml/datasets/census+income\ntrain = pd.read_csv(\"../data/adult_data.txt\", sep = \",\\s+\", header = None, engine = \"python\")\ntest = pd.read_csv(\"../data/adult_test.txt\", sep = \",\\s+\", header = None, skiprows = 1, engine = \"python\")\nlabel_train = train[14].map({\"<=50K\": 0, \">50K\": 1}).tolist()\nlabel_test = test[14].map({\"<=50K.\": 0, \">50K.\": 1}).tolist()\ntrain = train.iloc[:, :-2]\ntest = test.iloc[:, :-2]\n\n# one-hot-encoding on categorical features\nfeature_names = [\"age\", \"workclass\", \"fnlwgt\", \"education\", \"education-num\", \"marital-status\", \"occupation\", \n \"relationship\", \"race\", \"sex\", \"capital-gain\", \"capital-loss\", \"hours-per-week\"]\ntrain.columns = feature_names\ntest.columns = feature_names\ncategorical_feature_names = [\"workclass\", \"education\", \"marital-status\", \"occupation\", \"relationship\", \"race\", \"sex\"]\ndef dummy_transform(df):\n for name in categorical_feature_names:\n dummy_df = pd.get_dummies(df[name])\n if \"?\" in dummy_df.columns.values:\n dummy_df.drop(\"?\", axis=1, inplace=True)\n df = pd.concat([df, dummy_df], axis=1)\n df.drop(name, axis=1, inplace=True)\n return df\ntrain = dummy_transform(train)\ntest = dummy_transform(test)\nprint(\"Training data has {} rows and {} columns.\".format(train.shape[0], train.shape[1])) \nprint(\"Testing data has {} rows and {} columns.\".format(test.shape[0], test.shape[1])) ",
"_____no_output_____"
]
],
[
[
"## Train a random forest model using scikit-learn and compute SHAP values",
"_____no_output_____"
]
],
[
[
"n_estimators = 200 # number of trees in random forest model\nmax_depth = 8 # maximum depth of any trees in random forest model",
"_____no_output_____"
],
[
"# train a random forest model\nrf_model = RandomForestClassifier(n_estimators = n_estimators, max_depth = max_depth, random_state = 0)\nrf_model.fit(train, label_train)\nprint(\"AUC on testing set is {:.2f}.\".format(roc_auc_score(label_test, rf_model.predict_proba(test)[:, 1])))\nprint(\"Accuracy on testing set is {:.2f}.\".format(accuracy_score(label_test, rf_model.predict(test))))",
"_____no_output_____"
],
[
"# obtain total number of leaves\nshap_explainer = fasttreeshap.TreeExplainer(rf_model)\nnum_leaves = sum(shap_explainer.model.num_nodes) - sum(sum(shap_explainer.model.children_left > 0))\nprint(\"Total number of leaves is {}.\".format(num_leaves))",
"_____no_output_____"
],
[
"# estimate memory usage of FastTreeSHAP v2 since FastTreeSHAP v2 has a stricter memory constraint than\n# TreeSHAP and FastTreeSHAP v1\n# derivation of the memory estimation can be found in Deep Dive Section in this notebook\ndef memory_estimate_v2(shap_explainer, num_sample, num_feature, n_jobs):\n max_node = max(shap_explainer.model.num_nodes)\n max_leaves = (max_node + 1) // 2\n max_combinations = 2**int(shap_explainer.model.max_depth)\n phi_dim = num_sample * (num_feature + 1) * shap_explainer.model.num_outputs\n n_jobs = os.cpu_count() if n_jobs == -1 else n_jobs\n memory_1 = (max_leaves * max_combinations + phi_dim) * 8 * n_jobs\n memory_2 = max_leaves * max_combinations * shap_explainer.model.values.shape[0] * 8\n memory = min(memory_1, memory_2)\n if memory < 1024:\n print(\"Memory usage of FastTreeSHAP v2 is around {:.2f}B.\".format(memory))\n elif memory / 1024 < 1024:\n print(\"Memory usage of FastTreeSHAP v2 is around {:.2f}KB.\".format(memory / 1024))\n elif memory / 1024**2 < 1024:\n print(\"Memory usage of FastTreeSHAP v2 is around {:.2f}MB.\".format(memory / 1024**2))\n else:\n print(\"Memory usage of FastTreeSHAP v2 is around {:.2f}GB.\".format(memory / 1024**3))",
"_____no_output_____"
]
],
[
[
"### Compute SHAP values via different versions of TreeSHAP",
"_____no_output_____"
]
],
[
[
"num_sample = 10000 # number of samples to be explained\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v0 (i.e., original TreeSHAP)\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v0\", n_jobs = n_jobs)\nshap_values_v0 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v0.shape",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v1\", n_jobs = n_jobs)\nshap_values_v1 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v1.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v1\nprint(\"Maximum difference of SHAP values between v1 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v1 - shap_values_v0))))",
"_____no_output_____"
],
[
"# estimate memory usage of FastTreeSHAP v2 since FastTreeSHAP v2 has a stricter memory constraint than\n# TreeSHAP and FastTreeSHAP v1\nmemory_estimate_v2(shap_explainer, num_sample, test.shape[1], n_jobs)",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v2\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v2\", n_jobs = n_jobs)\nshap_values_v2 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v2.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v2\nprint(\"Maximum difference of SHAP values between v2 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v2 - shap_values_v0))))",
"_____no_output_____"
],
[
"# compute SHAP values via automatic TreeSHAP algorithm selection\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"auto\", n_jobs = n_jobs)\nshap_values_auto = shap_explainer(test.iloc[:num_sample]).values\nshap_values_auto.shape",
"_____no_output_____"
],
[
"# justify the correctness of automatically selected TreeSHAP algorithm\n# it turns out that \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm\nprint(\"Maximum difference of SHAP values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v0))))",
"_____no_output_____"
]
],
[
[
"### Compare running times of different versions of TreeSHAP in computing SHAP values",
"_____no_output_____"
]
],
[
[
"# compute SHAP values/SHAP interaction values via TreeSHAP algorithm with version \"algorithm_version\"\n# (parallel on \"n_jobs\" threads)\ndef run_fasttreeshap(model, sample, interactions, algorithm_version, n_jobs, num_round, num_sample, shortcut = False):\n shap_explainer = fasttreeshap.TreeExplainer(\n model, algorithm = algorithm_version, n_jobs = n_jobs, shortcut = shortcut)\n run_time = np.zeros(num_round)\n for i in range(num_round):\n start = time.time()\n shap_values = shap_explainer(sample.iloc[:num_sample], interactions = interactions).values\n run_time[i] = time.time() - start\n print(\"Round {} takes {:.3f} sec.\".format(i + 1, run_time[i]))\n print(\"Average running time of {} is {:.3f} sec (std {:.3f} sec){}.\".format(\n algorithm_version, np.mean(run_time), np.std(run_time), \" (with shortcut)\" if shortcut else \"\"))",
"_____no_output_____"
],
[
"num_sample = 10000 # number of samples to be explained\nnum_round = 3 # number of rounds to record mean and standard deviation of running time\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v0 (i.e., original TreeSHAP) multiple times and record its average running time\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = False, algorithm_version = \"v0\", n_jobs = n_jobs,\n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v1 multiple times and record its average running time\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = False, algorithm_version = \"v1\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v2 multiple times and record its average running time\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = False, algorithm_version = \"v2\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
],
[
"# run automatically selected TreeSHAP algorithm multiple times and record its average running time\n# it turns out that \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = False, algorithm_version = \"auto\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
]
],
[
[
"### Compute SHAP interaction values via different versions of TreeSHAP",
"_____no_output_____"
]
],
[
[
"num_sample = 100 # number of samples to be explained\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# compute SHAP interaction values via FastTreeSHAP v0 (i.e., original TreeSHAP)\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v0\", n_jobs = n_jobs)\nshap_interaction_values_v0 = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_v0.shape",
"_____no_output_____"
],
[
"# compute SHAP interaction values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v1\", n_jobs = n_jobs)\nshap_interaction_values_v1 = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_v1.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v1\nprint(\"Maximum difference of SHAP interaction values between v1 and v0 is {:.2e}.\".format(\n np.max(abs(shap_interaction_values_v1 - shap_interaction_values_v0))))",
"_____no_output_____"
],
[
"# compute SHAP interaction values via automatic TreeSHAP algorithm selection\n# v1 is always preferred to v0 in any use cases, and v2 does not support interactions\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"auto\", n_jobs = n_jobs)\nshap_interaction_values_auto = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_auto.shape",
"_____no_output_____"
],
[
"# justify the correctness of automatically selected TreeSHAP algorithm\nprint(\"Maximum difference of SHAP interaction values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_interaction_values_auto - shap_interaction_values_v0))))",
"_____no_output_____"
]
],
[
[
"### Compare running times of different versions of TreeSHAP in computing SHAP interaction values",
"_____no_output_____"
]
],
[
[
"num_sample = 100 # number of samples to be explained\nnum_round = 3 # number of rounds to record mean and standard deviation of running time\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v0 (i.e., original TreeSHAP) multiple times and record its average running time\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = True, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v1 multiple times and record its average running time\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = True, algorithm_version = \"v1\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
],
[
"# run automatically selected TreeSHAP algorithm multiple times and record its average running time\n# v1 is always preferred to v0 in any use cases, and v2 does not support interactions\nrun_fasttreeshap(\n model = rf_model, sample = test, interactions = True, algorithm_version = \"auto\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample)",
"_____no_output_____"
]
],
[
[
"## Train an XGBoost model and compute SHAP values",
"_____no_output_____"
]
],
[
[
"n_estimators = 200 # number of trees in XGBoost model\nmax_depth = 8 # maximum depth of any trees in XGBoost model",
"_____no_output_____"
],
[
"# train an XGBoost model\nxgb_model = xgb.XGBClassifier(\n max_depth = max_depth, n_estimators = n_estimators, learning_rate = 0.1, n_jobs = -1,\n use_label_encoder = False, eval_metric = \"logloss\", random_state = 0)\nxgb_model.fit(train, label_train)\nprint(\"AUC on testing set is {:.2f}.\".format(roc_auc_score(label_test, xgb_model.predict_proba(test)[:, 1])))\nprint(\"Accuracy on testing set is {:.2f}.\".format(accuracy_score(label_test, xgb_model.predict(test))))",
"_____no_output_____"
],
[
"# obtain total number of leaves\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model)\nnum_leaves = sum(shap_explainer.model.num_nodes) - sum(sum(shap_explainer.model.children_left > 0))\nprint(\"Total number of leaves is {}.\".format(num_leaves))",
"_____no_output_____"
]
],
[
[
"### Compute SHAP values via different versions of TreeSHAP",
"_____no_output_____"
]
],
[
[
"num_sample = 10000 # number of samples to be explained\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# compute SHAP values via \"shortcut\" (i.e., original TreeSHAP in XGBoost package)\n# by default, parallel computing on all available cores is enabled in \"shortcut\"\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = True)\nshap_values_shortcut = shap_explainer(test.iloc[:num_sample]).values\nshap_values_shortcut.shape",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v0 (i.e., original TreeSHAP in SHAP package)\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = False)\nshap_values_v0 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v0.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v0\nprint(\"Mean and maximum differences of SHAP values between v0 and shortcut is {:.2e} and {:.2e}.\".format(\n np.mean(abs(shap_values_v0 - shap_values_shortcut)), np.max(abs(shap_values_v0 - shap_values_shortcut))))",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v1\", n_jobs = n_jobs, shortcut = False)\nshap_values_v1 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v1.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v1\nprint(\"Maximum difference of SHAP values between v1 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v1 - shap_values_v0))))",
"_____no_output_____"
],
[
"# estimate memory usage of FastTreeSHAP v2 since FastTreeSHAP v2 has a stricter memory constraint than\n# TreeSHAP and FastTreeSHAP v1\nmemory_estimate_v2(shap_explainer, num_sample, test.shape[1], n_jobs)",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v2\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v2\", n_jobs = n_jobs, shortcut = False)\nshap_values_v2 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v2.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v2\nprint(\"Maximum difference of SHAP values between v2 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v2 - shap_values_v0))))",
"_____no_output_____"
],
[
"# compute SHAP values via automatic TreeSHAP algorithm selection\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"auto\", n_jobs = n_jobs, shortcut = False)\nshap_values_auto = shap_explainer(test.iloc[:num_sample]).values\nshap_values_auto.shape",
"_____no_output_____"
],
[
"# justify the correctness of automatically selected TreeSHAP algorithm\n# it turns out that \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm\nprint(\"Maximum difference of SHAP values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v0))))",
"_____no_output_____"
]
],
[
[
"### Compare running times of different versions of TreeSHAP in computing SHAP values",
"_____no_output_____"
]
],
[
[
"num_sample = 10000 # number of samples to be explained\nnum_round = 3 # number of rounds to record mean and standard deviation of running time\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# run \"shortcut\" version of TreeSHAP multiple times and record its average running time\n# by default, parallel computing on all available cores is enabled in \"shortcut\"\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = False, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = True)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v0 (i.e., original TreeSHAP) multiple times and record its average running time\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = False, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v1 multiple times and record its average running time\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = False, algorithm_version = \"v1\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v2 multiple times and record its average running time\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = False, algorithm_version = \"v2\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run automatically selected TreeSHAP algorithm multiple times and record its average running time\n# it turns out that \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = False, algorithm_version = \"auto\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
]
],
[
[
"### Compute SHAP interaction values via different versions of TreeSHAP",
"_____no_output_____"
]
],
[
[
"num_sample = 100 # number of samples to be explained\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# compute SHAP interaction values via \"shortcut\" (i.e., original TreeSHAP in XGBoost package)\n# by default, parallel computing on all available cores is enabled in \"shortcut\"\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = True)\nshap_interaction_values_shortcut = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_shortcut.shape",
"_____no_output_____"
],
[
"# compute SHAP interaction values via FastTreeSHAP v0 (i.e., original TreeSHAP in SHAP package)\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = False)\nshap_interaction_values_v0 = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_v0.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v0\nprint(\"Mean and maximum differences of SHAP values between v0 and shortcut is {:.2e} and {:.2e}.\".format(\n np.mean(abs(shap_interaction_values_v0 - shap_interaction_values_shortcut)), \n np.max(abs(shap_interaction_values_v0 - shap_interaction_values_shortcut))))",
"_____no_output_____"
],
[
"# compute SHAP interaction values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"v1\", n_jobs = n_jobs, shortcut = False)\nshap_interaction_values_v1 = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_v1.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v1\nprint(\"Maximum difference of SHAP interaction values between v1 and v0 is {:.2e}.\".format(\n np.max(abs(shap_interaction_values_v1 - shap_interaction_values_v0))))",
"_____no_output_____"
],
[
"# compute SHAP interaction values via automatic TreeSHAP algorithm selection\n# v1 is always preferred to v0 in any use cases, and v2 does not support interactions\nshap_explainer = fasttreeshap.TreeExplainer(xgb_model, algorithm = \"auto\", n_jobs = n_jobs, shortcut = False)\nshap_interaction_values_auto = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_auto.shape",
"_____no_output_____"
],
[
"# justify the correctness of automatically selected TreeSHAP algorithm\nprint(\"Maximum difference of SHAP interaction values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_interaction_values_auto - shap_interaction_values_v0))))",
"_____no_output_____"
]
],
[
[
"### Compare running times of different versions of TreeSHAP in computing SHAP interaction values",
"_____no_output_____"
]
],
[
[
"num_sample = 100 # number of samples to be explained\nnum_round = 3 # number of rounds to record mean and standard deviation of running time\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# run \"shortcut\" version of TreeSHAP multiple times and record its average running time\n# by default, parallel computing on all available cores is enabled in \"shortcut\"\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = True, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = True)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v0 (i.e., original TreeSHAP) multiple times and record its average running time\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = True, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v1 multiple times and record its average running time\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = True, algorithm_version = \"v1\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run automatically selected TreeSHAP algorithm multiple times and record its average running time\n# v1 is always preferred to v0 in any use cases, and v2 does not support interactions\nrun_fasttreeshap(\n model = xgb_model, sample = test, interactions = True, algorithm_version = \"auto\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
]
],
[
[
"## Train a LightGBM model and compute SHAP values",
"_____no_output_____"
]
],
[
[
"n_estimators = 500 # number of trees in LightGBM model\nmax_depth = 8 # maximum depth of any trees in LightGBM model",
"_____no_output_____"
],
[
"# train a LightGBM model\nlgb_model = lgb.LGBMClassifier(\n max_depth = max_depth, n_estimators = n_estimators, learning_rate = 0.1, n_jobs = -1, random_state = 0)\nlgb_model.fit(train, label_train)\nprint(\"AUC on testing set is {:.2f}.\".format(roc_auc_score(label_test, lgb_model.predict_proba(test)[:, 1])))\nprint(\"Accuracy on testing set is {:.2f}.\".format(accuracy_score(label_test, lgb_model.predict(test))))",
"_____no_output_____"
],
[
"# obtain total number of leaves\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model)\nnum_leaves = sum(shap_explainer.model.num_nodes) - sum(sum(shap_explainer.model.children_left > 0))\nprint(\"Total number of leaves is {}.\".format(num_leaves))",
"_____no_output_____"
]
],
[
[
"### Compute SHAP values via different versions of TreeSHAP",
"_____no_output_____"
]
],
[
[
"num_sample = 10000 # number of samples to be explained\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# compute SHAP values via \"shortcut\" (i.e., original TreeSHAP in LightGBM package)\n# by default, parallel computing on all available cores is enabled in \"shortcut\"\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = True)\nshap_values_shortcut = shap_explainer(test.iloc[:num_sample]).values\nshap_values_shortcut.shape",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v0 (i.e., original TreeSHAP in SHAP package)\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = False)\nshap_values_v0 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v0.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v0\nprint(\"Mean and maximum differences of SHAP values between v0 and shortcut is {:.2e} and {:.2e}.\".format(\n np.mean(abs(shap_values_v0 - shap_values_shortcut[:,:,1])),\n np.max(abs(shap_values_v0 - shap_values_shortcut[:,:,1]))))",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"v1\", n_jobs = n_jobs, shortcut = False)\nshap_values_v1 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v1.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v1\nprint(\"Maximum difference of SHAP values between v1 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v1 - shap_values_v0))))",
"_____no_output_____"
],
[
"# estimate memory usage of FastTreeSHAP v2 since FastTreeSHAP v2 has a stricter memory constraint than\n# TreeSHAP and FastTreeSHAP v1\nmemory_estimate_v2(shap_explainer, num_sample, test.shape[1], n_jobs)",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v2\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"v2\", n_jobs = n_jobs, shortcut = False)\nshap_values_v2 = shap_explainer(test.iloc[:num_sample]).values\nshap_values_v2.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v2\nprint(\"Maximum difference of SHAP values between v2 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v2 - shap_values_v0))))",
"_____no_output_____"
],
[
"# compute SHAP values via automatic TreeSHAP algorithm selection\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"auto\", n_jobs = n_jobs, shortcut = False)\nshap_values_auto = shap_explainer(test.iloc[:num_sample]).values\nshap_values_auto.shape",
"_____no_output_____"
],
[
"# justify the correctness of automatically selected TreeSHAP algorithm\n# it turns out that \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm\nprint(\"Maximum difference of SHAP values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v0))))",
"_____no_output_____"
]
],
[
[
"### Compare running times of different versions of TreeSHAP in computing SHAP values",
"_____no_output_____"
]
],
[
[
"num_sample = 10000 # number of samples to be explained\nnum_round = 3 # number of rounds to record mean and standard deviation of running time\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# run \"shortcut\" version of TreeSHAP multiple times and record its average running time\n# by default, parallel computing on all available cores is enabled in \"shortcut\"\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = False, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = True)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v0 (i.e., original TreeSHAP) multiple times and record its average running time\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = False, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v1 multiple times and record its average running time\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = False, algorithm_version = \"v1\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v2 multiple times and record its average running time\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = False, algorithm_version = \"v2\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run automatically selected TreeSHAP algorithm multiple times and record its average running time\n# it turns out that \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = False, algorithm_version = \"auto\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
]
],
[
[
"### Compute SHAP interaction values via different versions of TreeSHAP",
"_____no_output_____"
]
],
[
[
"num_sample = 100 # number of samples to be explained\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# compute SHAP interaction values via FastTreeSHAP v0 (i.e., original TreeSHAP in SHAP package)\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\n# \"shortcut\" of SHAP interaction values is not enabled for LightGBM in SHAP package\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"v0\", n_jobs = n_jobs, shortcut = False)\nshap_interaction_values_v0 = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_v0.shape",
"_____no_output_____"
],
[
"# compute SHAP interaction values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"v1\", n_jobs = n_jobs, shortcut = False)\nshap_interaction_values_v1 = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_v1.shape",
"_____no_output_____"
],
[
"# justify the correctness of FastTreeSHAP v1\nprint(\"Maximum difference of SHAP interaction values between v1 and v0 is {:.2e}.\".format(\n np.max(abs(shap_interaction_values_v1 - shap_interaction_values_v0))))",
"_____no_output_____"
],
[
"# compute SHAP interaction values via automatic TreeSHAP algorithm selection\n# v1 is always preferred to v0 in any use cases, and v2 does not support interactions\nshap_explainer = fasttreeshap.TreeExplainer(lgb_model, algorithm = \"auto\", n_jobs = n_jobs, shortcut = False)\nshap_interaction_values_auto = shap_explainer(test.iloc[:num_sample], interactions = True).values\nshap_interaction_values_auto.shape",
"_____no_output_____"
],
[
"# justify the correctness of automatically selected TreeSHAP algorithm\nprint(\"Maximum difference of SHAP interaction values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_interaction_values_auto - shap_interaction_values_v0))))",
"_____no_output_____"
]
],
[
[
"### Compare running times of different versions of TreeSHAP in computing SHAP interaction values",
"_____no_output_____"
]
],
[
[
"num_sample = 100 # number of samples to be explained\nnum_round = 3 # number of rounds to record mean and standard deviation of running time\nn_jobs = -1 # number of parallel threads (-1 means utilizing all available cores)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v0 (i.e., original TreeSHAP) multiple times and record its average running time\n# parallel computing is not enabled in original TreeSHAP in SHAP package, but here we enable it for a fair comparison\n# on execution time\n# \"shortcut\" of SHAP interaction values is not enabled for LightGBM in SHAP package\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = True, algorithm_version = \"v0\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run FastTreeSHAP v1 multiple times and record its average running time\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = True, algorithm_version = \"v1\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
],
[
"# run automatically selected TreeSHAP algorithm multiple times and record its average running time\n# v1 is always preferred to v0 in any use cases, and v2 does not support interactions\nrun_fasttreeshap(\n model = lgb_model, sample = test, interactions = True, algorithm_version = \"auto\", n_jobs = n_jobs, \n num_round = num_round, num_sample = num_sample, shortcut = False)",
"_____no_output_____"
]
],
[
[
"## Deep dive into automatic algorithm selection",
"_____no_output_____"
],
[
"The default value of the argument `algorithm` in the class `TreeExplainer` is `auto`, indicating that the TreeSHAP algorithm is automatically selected from `\"v0\"`, `\"v1\"` and `\"v2\"` according to the number of samples to be explained and the constraint on the allocated memory.",
"_____no_output_____"
],
[
"Specifically, `\"v1\"` is always perferred to `\"v0\"` in any use cases, and `\"v2\"` is perferred to `\"v1\"` when the number of samples to be explained is sufficiently large: <img src=\"https://latex.codecogs.com/svg.latex?M>2^{D+1}/D,\"/> and the memory constraint is also satisfied: <img src=\"https://latex.codecogs.com/svg.latex?min\\{(MN+L2^D)\\cdot C,\\;TL2^D\\}\\cdot8Byte<0.25\\cdot Total\\,Memory.\"/> Here *M* is the number of samples to be explained, *D* is the maximum depth of any tree, *N* is the number of features, *L* is the maximum number of leaves in any tree, *T* is the number of trees, and *C* is the number of parallel threads.",
"_____no_output_____"
],
[
"The derivation of the first criterion (threshold on number of samples) can be found in [FastTreeSHAP](https://arxiv.org/abs/2109.09847) paper. The derivation of the second criterion (memory constraint) is based on the implementation of parallel computing for FastTreeSHAP v2: Two versions of parallel computing have been implemented. Version 1 builds a parallel for-loop over all trees, which requires <img src=\"https://latex.codecogs.com/svg.latex?(MN+L2^D)\\cdot C\\cdot8Byte\"/> memory allocation (each thread has its own matrices to store both SHAP values and pre-computed values). Version 2 builds two consecutive parallel for-loops over all trees and over all samples respectively, which requires <img src=\"https://latex.codecogs.com/svg.latex?TL2^D\\cdot8Byte\"/> memory allocation (first parallel for-loop stores pre-computed values across all trees). In FastTreeSHAP package, based on empirical results, version 1 has a higher priority than version 2, i.e., version 1 will be selected if both version 1 and version 2 satisfy the memory constraint.",
"_____no_output_____"
],
[
"### Automatic algorithm selection in moderate models with varying number of samples to be explained",
"_____no_output_____"
],
[
"In moderate models (i.e., memory constraint is not a big concern), `\"auto\"` selects `\"v2\"` when the number of samples to be explained exceeds a threshold as defined above, and selects `\"v1\"` otherwise.",
"_____no_output_____"
]
],
[
[
"n_estimators = 200 # number of trees in random forest model\nmax_depth = 8 # maximum depth of any trees in random forest model",
"_____no_output_____"
],
[
"# train a random forest model\nrf_model = RandomForestClassifier(n_estimators = n_estimators, max_depth = max_depth, random_state = 0)\nrf_model.fit(train, label_train)",
"_____no_output_____"
],
[
"# estimated memory usage of FastTreeSHAP v2 shows that memory constraint is not a big concern\n# even for as many as 100,000 samples and parallel computing on all available cores\nshap_explainer = fasttreeshap.TreeExplainer(rf_model)\nmemory_estimate_v2(shap_explainer, num_sample = 100000, num_feature = test.shape[1], n_jobs = -1)",
"_____no_output_____"
]
],
[
[
"When number of samples to be explained is 100, `\"auto\"` selects `\"v2\"` as the most appropriate TreeSHAP algorithm.",
"_____no_output_____"
]
],
[
[
"# number of samples to be explained\nnum_sample = 100",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v0 (i.e., original TreeSHAP)\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v0\")\nshap_values_v0 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v1\")\nshap_values_v1 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via FastTreeSHAP v2\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v2\")\nshap_values_v2 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via automatic TreeSHAP algorithm selection\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"auto\")\nshap_values_auto = shap_explainer(test.iloc[:num_sample]).values",
"_____no_output_____"
],
[
"# \"auto\" selects \"v2\" as the most appropriate TreeSHAP algorithm when number of samples is 100\nprint(\"Maximum difference of SHAP values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v0))))\nprint(\"Maximum difference of SHAP values between auto and v1 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v1))))\nprint(\"Maximum difference of SHAP values between auto and v2 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v2))))",
"_____no_output_____"
]
],
[
[
"When number of samples to be explained is 50, `\"auto\"` selects `\"v1\"` as the most appropriate TreeSHAP algorithm.",
"_____no_output_____"
]
],
[
[
"# number of samples to be explained\nnum_sample = 50",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v0 (i.e., original TreeSHAP)\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v0\")\nshap_values_v0 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v1\")\nshap_values_v1 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via FastTreeSHAP v2\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v2\")\nshap_values_v2 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via automatic TreeSHAP algorithm selection\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"auto\")\nshap_values_auto = shap_explainer(test.iloc[:num_sample]).values",
"_____no_output_____"
],
[
"# \"auto\" selects \"v1\" as the most appropriate TreeSHAP algorithm when number of samples is 50\nprint(\"Maximum difference of SHAP values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v0))))\nprint(\"Maximum difference of SHAP values between auto and v1 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v1))))\nprint(\"Maximum difference of SHAP values between auto and v2 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v2))))",
"_____no_output_____"
]
],
[
[
"### Automatic algorithm selection in very large models",
"_____no_output_____"
],
[
"In very large models, `\"auto\"` selects `\"v1\"` instead of `\"v2\"` when the potential memory risk is detected.",
"_____no_output_____"
]
],
[
[
"n_estimators = 200 # number of trees in random forest model\nmax_depth = 20 # maximum depth of any trees in random forest model",
"_____no_output_____"
],
[
"# train a random forest model\nrf_model = RandomForestClassifier(n_estimators = n_estimators, max_depth = max_depth, random_state = 0)\nrf_model.fit(train, label_train)",
"_____no_output_____"
],
[
"# estimated memory usage of FastTreeSHAP v2 shows a potential memory risk\n# even for only 10 samples and no parallel computing\nshap_explainer = fasttreeshap.TreeExplainer(rf_model)\nmemory_estimate_v2(shap_explainer, num_sample = 10, num_feature = test.shape[1], n_jobs = 1)",
"_____no_output_____"
],
[
"# number of samples to be explained\nnum_sample = 10",
"_____no_output_____"
],
[
"# compute SHAP values via FastTreeSHAP v0 (i.e., original TreeSHAP)\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v0\")\nshap_values_v0 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via FastTreeSHAP v1\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v1\")\nshap_values_v1 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via FastTreeSHAP v2\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"v2\")\nshap_values_v2 = shap_explainer(test.iloc[:num_sample]).values\n\n# compute SHAP values via automatic TreeSHAP algorithm selection\nshap_explainer = fasttreeshap.TreeExplainer(rf_model, algorithm = \"auto\")\nshap_values_auto = shap_explainer(test.iloc[:num_sample]).values",
"_____no_output_____"
],
[
"# \"v2\" is automatically switched to \"v1\" as potential memory risk is detected\nprint(\"Maximum difference of SHAP values between v2 and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_v2 - shap_values_v0))))\nprint(\"Maximum difference of SHAP values between v2 and v1 is {:.2e}.\".format(\n np.max(abs(shap_values_v2 - shap_values_v1))))",
"_____no_output_____"
],
[
"# \"auto\" selects \"v1\" as the most appropriate TreeSHAP algorithm as potential memory risk is detected\nprint(\"Maximum difference of SHAP values between auto and v0 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v0))))\nprint(\"Maximum difference of SHAP values between auto and v1 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v1))))\nprint(\"Maximum difference of SHAP values between auto and v2 is {:.2e}.\".format(\n np.max(abs(shap_values_auto - shap_values_v2))))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e750d1d6a25adc1af40f2db56cecf5b3255a31a9 | 3,728 | ipynb | Jupyter Notebook | Create Sorted Graph.ipynb | AhmadTaha96/Facebook-Friendship-Prediction | be7b2aec0115df971284edd20d9d7b945b82cf68 | [
"BSL-1.0"
] | null | null | null | Create Sorted Graph.ipynb | AhmadTaha96/Facebook-Friendship-Prediction | be7b2aec0115df971284edd20d9d7b945b82cf68 | [
"BSL-1.0"
] | null | null | null | Create Sorted Graph.ipynb | AhmadTaha96/Facebook-Friendship-Prediction | be7b2aec0115df971284edd20d9d7b945b82cf68 | [
"BSL-1.0"
] | null | null | null | 28.899225 | 288 | 0.515826 | [
[
[
"# Importing Libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport shutil\nimport os\nimport networkx as nx",
"_____no_output_____"
]
],
[
[
"# Create Sorted Graph",
"_____no_output_____"
],
[
"We are gonna now design features based on geometric information in the graph we have, what we call embedding for each node in the graph give it's neighborhood nodes to get it's place inside the graph.",
"_____no_output_____"
]
],
[
[
"train_graph = nx.read_edgelist(\"Data/train graph.csv\", comments = 's', create_using = nx.DiGraph(), nodetype = int, delimiter = \",\")",
"_____no_output_____"
]
],
[
[
"Now as we are gonint to use KarateClub library to import the algorithm specifically (NetMF Embedding ALgortihm), for thatwe have to modify our graph as KarateClub does not accept scattered graph(unsorted graph based on index) so we have to design new graph given our train graph",
"_____no_output_____"
]
],
[
[
"# to get back from sorted graph to train graph\nbase_nodes = dict()\n# to move from train graph to sorted graph\nreflection = dict()\nin_edges = dict()\nout_edges = dict()\n\nsorted_graph = nx.DiGraph()\n\ni = 0\nfor node in train_graph.nodes():\n base_nodes[i] = node\n reflection[node] = i\n in_edges[i] = train_graph.predecessors(node)\n out_edges[i] = train_graph.successors(node)\n i += 1\n\n# adding nodes to new sorted graph\nfor i in range(1821369):\n sorted_graph.add_node(i)\n\n# adding incoming / outgoing edges for each node in the new graph\nfor i in range(1821369): # number of nodes in the train graph\n \n for incoming in in_edges[i]:\n sorted_graph.add_edge(reflection[incoming], i)\n \n for outcoming in out_edges[i]:\n sorted_graph.add_edge(i, reflection[outcoming])\n\n# saving graph in picke format\nnx.write_gpickle(sorted_graph,\"Data/sorted_graph.gpickle\")\n\n# to read leater\n# sorted_graph = nx.read_gpickle('sorted_graph.gpickle')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e750d774021ba0e627af0a3c94b23129e8750855 | 49,404 | ipynb | Jupyter Notebook | Chapter 4/ch4 Affinity Analysis.ipynb | aglaiawong/Learning-Data-Mining-with-Python | 7f92dd83bda1caadd17bfec7a20c461843d38670 | [
"MIT"
] | 175 | 2016-07-14T05:29:27.000Z | 2022-03-30T09:04:29.000Z | Chapter 4/ch4 Affinity Analysis.ipynb | aglaiawong/Learning-Data-Mining-with-Python | 7f92dd83bda1caadd17bfec7a20c461843d38670 | [
"MIT"
] | 1 | 2018-02-07T21:36:42.000Z | 2020-10-19T12:03:57.000Z | Chapter 4/ch4 Affinity Analysis.ipynb | aglaiawong/Learning-Data-Mining-with-Python | 7f92dd83bda1caadd17bfec7a20c461843d38670 | [
"MIT"
] | 161 | 2016-09-09T02:35:43.000Z | 2022-03-29T07:18:15.000Z | 34.213296 | 281 | 0.420249 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e750e145f882ebf604cdd83babcfbc25d83ab6af | 9,711 | ipynb | Jupyter Notebook | CODE/debug/from_pretrained.ipynb | Zaaachary/CSQA | 6da6e076f67e9458deacb665d31463db14c7d860 | [
"BSD-3-Clause"
] | null | null | null | CODE/debug/from_pretrained.ipynb | Zaaachary/CSQA | 6da6e076f67e9458deacb665d31463db14c7d860 | [
"BSD-3-Clause"
] | null | null | null | CODE/debug/from_pretrained.ipynb | Zaaachary/CSQA | 6da6e076f67e9458deacb665d31463db14c7d860 | [
"BSD-3-Clause"
] | null | null | null | 38.082353 | 366 | 0.568634 | [
[
[
"%cd ../",
"d:\\CODE\\Commonsense\\CSQA_dev\\CODE\n"
],
[
"from copy import deepcopy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformers import AlbertPreTrainedModel, AlbertTokenizer, AlbertConfig\n\nfrom model.AlbertModel import AlbertModel\n",
"_____no_output_____"
],
[
"class AlbertBurger(nn.Module):\n\n def __init__(self, config, **kwargs):\n\n super(AlbertBurger, self).__init__()\n\n albert1_layers = kwargs['albert1_layers']\n\n self.config1 = deepcopy(config)\n self.config1.num_hidden_layers = albert1_layers\n self.config2 = deepcopy(config)\n self.config2.num_hidden_layers = config.num_hidden_layers - albert1_layers\n self.config2.without_embedding = True\n\n self.albert1 = AlbertModel(self.config1)\n self.albert2 = AlbertModel(self.config2)\n\n self.scorer = nn.Sequential(\n nn.Dropout(0.1),\n nn.Linear(config.hidden_size, 1)\n )\n\n self.apply(self.init_weights)\n\n def forward(self, input_ids, attention_mask, token_type_ids, labels=None):\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n \n outputs = self.albert1(input_ids, attention_mask, token_type_ids)\n hidden_state_1 = outputs.last_hidden_state\n outputs = self.albert2(inputs_embeds=hidden_state_1)\n return outputs.last_hidden_state\n\n\n @staticmethod\n def init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, model_path_or_name, **kwargs):\n\n config = AlbertConfig()\n config.without_embedding = False\n if \"xxlarge\" in model_path_or_name:\n config.hidden_size = 4096\n config.intermediate_size = 16384\n config.num_attention_heads = 64\n config.num_hidden_layers = 12\n elif \"xlarge\" in model_path_or_name:\n config.hidden_size = 2048\n config.intermediate_size = 8192\n config.num_attention_heads = 16\n config.num_hidden_layers = 24\n elif \"large\" in model_path_or_name:\n config.hidden_size = 1024\n config.intermediate_size = 4096\n config.num_attention_heads = 16\n config.num_hidden_layers = 24\n elif \"base\" in model_path_or_name:\n config.hidden_size = 768\n config.intermediate_size = 3072\n config.num_attention_heads = 12\n config.num_hidden_layers = 12\n\n model = cls(config, **kwargs)\n model.albert1 = model.albert1.from_pretrained(model_path_or_name, config=model.config1)\n model.albert2 = model.albert2.from_pretrained(model_path_or_name, config=model.config2)\n\n return model\n",
"_____no_output_____"
],
[
"# model = AlbertBurger.from_pretrained(r'D:\\CODE\\Python\\Transformers-Models\\albert-base-v2', albert1_run=6)\nkwargs6 = {'albert1_layers': 6}\nmodel_6 = AlbertBurger.from_pretrained(r'D:\\CODE\\Python\\Transformers-Models\\albert-base-v2', **kwargs6)\n\nkwargs12 = {'albert1_layers': 0}\nmodel_12 = AlbertBurger.from_pretrained(r'D:\\CODE\\Python\\Transformers-Models\\albert-base-v2', **kwargs12)",
"Some weights of the model checkpoint at D:\\CODE\\Python\\Transformers-Models\\albert-base-v2 were not used when initializing AlbertModel: ['albert.embeddings.word_embeddings.weight', 'albert.embeddings.position_embeddings.weight', 'albert.embeddings.token_type_embeddings.weight', 'albert.embeddings.LayerNorm.weight', 'albert.embeddings.LayerNorm.bias']\n- This IS expected if you are initializing AlbertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing AlbertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nSome weights of the model checkpoint at D:\\CODE\\Python\\Transformers-Models\\albert-base-v2 were not used when initializing AlbertModel: ['albert.embeddings.word_embeddings.weight', 'albert.embeddings.position_embeddings.weight', 'albert.embeddings.token_type_embeddings.weight', 'albert.embeddings.LayerNorm.weight', 'albert.embeddings.LayerNorm.bias']\n- This IS expected if you are initializing AlbertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing AlbertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
],
[
"tokenizer = AlbertTokenizer.from_pretrained(r'D:\\CODE\\Python\\Transformers-Models\\albert-base-v2')\nfeature_dict = tokenizer.batch_encode_plus(['just have a test',], return_tensors='pt')\nfeature_dict",
"_____no_output_____"
],
[
"model_6(**feature_dict)",
"_____no_output_____"
],
[
"model_12(**feature_dict)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e750e14823bb81322f395597f968881ad46c3ef4 | 1,897 | ipynb | Jupyter Notebook | Notebooks/Template.ipynb | sandiegodata/covid19 | fc68f1175079ec987fa33404e972abc0d2c48fa6 | [
"CC0-1.0"
] | 1 | 2020-04-10T21:34:34.000Z | 2020-04-10T21:34:34.000Z | Notebooks/Template.ipynb | sandiegodata/covid19 | fc68f1175079ec987fa33404e972abc0d2c48fa6 | [
"CC0-1.0"
] | null | null | null | Notebooks/Template.ipynb | sandiegodata/covid19 | fc68f1175079ec987fa33404e972abc0d2c48fa6 | [
"CC0-1.0"
] | null | null | null | 18.782178 | 101 | 0.525567 | [
[
[
"show_input: hide\ngithub: \nfeatured_image: 189\nauthors:\n- email: [email protected]\n name: Eric Busboom\n organization: Civic Knowledge\n type: Analyst\ntags:\n- Untagged\ncategories:\n- Uncategorized",
"_____no_output_____"
]
],
[
[
"## Crime Rhythm Maps",
"_____no_output_____"
],
[
"Description\n",
"_____no_output_____"
]
],
[
[
"import sys\n# Install required packages\n!{sys.executable} -mpip -q install matplotlib seaborn statsmodels pandas publicdata metapack\n\n%matplotlib inline\n\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport metapack as mp\nimport rowgenerators as rg\nimport publicdata as pub\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(color_codes=True)",
"_____no_output_____"
]
]
] | [
"raw",
"markdown",
"code"
] | [
[
"raw"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e750e7c30043fec40c0aaa897043cf7925ad13a2 | 9,548 | ipynb | Jupyter Notebook | Neural Network.ipynb | utkarshg6/neuralnetworks | c953b66fb3c039332a1858c2d31edf92fa5eb9e6 | [
"MIT"
] | null | null | null | Neural Network.ipynb | utkarshg6/neuralnetworks | c953b66fb3c039332a1858c2d31edf92fa5eb9e6 | [
"MIT"
] | null | null | null | Neural Network.ipynb | utkarshg6/neuralnetworks | c953b66fb3c039332a1858c2d31edf92fa5eb9e6 | [
"MIT"
] | null | null | null | 31.202614 | 94 | 0.47057 | [
[
[
"# Neural Network",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pickle\nimport gzip\n\n\ndef load_data():\n \"\"\"\n MNIST comprises of three variables ->\n Training Data: 50,000 entries\n Validation Data: 10,000 entries\n Test Data: 10,000 entries\n\n One Entry of Input = 28 * 28 = 784\n One Entry of Output = 0 - 9 {integer}\n\n training_data = (inputs, outputs) - Tuple\n inputs.shape = (50,000, 784) - Numpy Array\n outputs.shape = (50,000,) - Numpy Array\n \"\"\"\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(\n f, encoding='iso-8859-1')\n f.close()\n return (training_data, validation_data, test_data)\n\n\ndef load_data_wrapper():\n \"\"\"\n It Returns Training Data, Validation Data, Test Data\n\n Training Data is a list of 50,000 entries.\n Each element of this list is a tuple in the form (Input, Output)\n Shape of 1 Input : (784,1)\n Shape of 1 Output : (10,1)\n\n Validation Data is a list of 10,000 entries.\n Each element of this list is a tuple in the form (Input, Output)\n Shape of 1 Input : (784,1)\n Shape of 1 Output : Integer\n\n Test Data is a list of 10,000 entries.\n Each element of this list is a tuple in the form (Input, Output)\n Shape of 1 Input : (784,1)\n Shape of 1 Output : Integer\n \"\"\"\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = list(zip(training_inputs, training_results))\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = list(zip(validation_inputs, va_d[1]))\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = list(zip(test_inputs, te_d[1]))\n return (training_data, validation_data, test_data)\n\n\ndef vectorized_result(j):\n \"\"\"\n Return a 10 bit vectorized result of a number.\n \"\"\"\n e = np.zeros((10, 1))\n e[j] = 1.0\n return e",
"_____no_output_____"
],
[
"import numpy as np\nimport random",
"_____no_output_____"
],
[
"### Mathematical Functions\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\ndef sigmoid_prime(z):\n return sigmoid(z) * (1 - sigmoid(z))",
"_____no_output_____"
],
[
"class Network:\n \n def __init__(self, sizes):\n \n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]\n \n def feedforward(self, a):\n \"\"\"Returns the output value.\"\"\"\n for w, b in zip(self.weights, self.biases):\n z = np.dot(w, a) + b\n a = sigmoid(z)\n return a\n \n def SGD(self, training_data, epochs, mini_batch_size, eta, test_data = None):\n if test_data:\n n_test = len(test_data)\n n = len(training_data)\n for epoch in range(epochs):\n random.shuffle(training_data)\n mini_batches = []\n for k in range(0, n, mini_batch_size):\n mini_batches.append(training_data[k : k + mini_batch_size])\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, eta)\n if test_data:\n print('Epoch {0}: {1} / {2}'.\n format(epoch, self.evaluate(test_data), n_test))\n else:\n print('Epoch {0} complete.'.format(epoch))\n \n def update_mini_batch(self, mini_batch, eta):\n sum_delta_b = [np.zeros(b.shape) for b in self.biases]\n sum_delta_w = [np.zeros(w.shape) for w in self.weights]\n \n for x, y in mini_batch:\n delta_b, delta_w = self.backprop(x, y)\n sum_delta_b = [sdb + db \n for sdb, db in zip(sum_delta_b, delta_b)]\n sum_delta_w = [sdw + dw\n for sdw, dw in zip(sum_delta_w, delta_w)]\n \n m = len(mini_batch)\n self.biases = [b - (eta/m) * sdb\n for b, sdb in zip(self.biases, sum_delta_b)]\n self.weights = [w - (eta/m) * sdw\n for w, sdw in zip(self.weights, sum_delta_w)]\n \n \n def backprop(self, x, y):\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta_w = [np.zeros(w.shape) for w in self.weights]\n \n # forward pass\n activation = x\n activations = [x]\n zs = []\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation) + b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n \n # backward pass\n delta = (activations[-1] - y) * sigmoid_prime(zs[-1])\n delta_b[-1] = delta\n delta_w[-1] = np.dot(delta, activations[-2].transpose())\n for l in range(2, self.num_layers):\n z = zs[-l]\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sigmoid_prime(z)\n delta_b[-l] = delta\n delta_w[-l] = np.dot(delta, activations[-l-1].transpose())\n \n return (delta_b, delta_w)\n \n def evaluate(self, test_data):\n result = 0\n for x, y in test_data:\n y_hat = np.argmax(self.feedforward(x))\n if (y_hat == y):\n result += 1\n return result ",
"_____no_output_____"
],
[
"net = Network([784, 100, 60, 10])",
"_____no_output_____"
],
[
"training_data, validation_data, test_data = load_data_wrapper()",
"_____no_output_____"
],
[
"net.SGD(training_data, 10, 30, 3.0, test_data = test_data)",
"Epoch 0: 8283 / 10000\nEpoch 1: 8455 / 10000\nEpoch 2: 8515 / 10000\nEpoch 3: 8580 / 10000\nEpoch 4: 8628 / 10000\nEpoch 5: 8632 / 10000\nEpoch 6: 8673 / 10000\nEpoch 7: 8652 / 10000\nEpoch 8: 8670 / 10000\nEpoch 9: 8701 / 10000\n"
],
[
"poornet = Network([784, 10])",
"_____no_output_____"
],
[
"poornet.SGD(training_data, 10, 32, 3.0, test_data = test_data)",
"Epoch 0: 5280 / 10000\nEpoch 1: 5560 / 10000\nEpoch 2: 5614 / 10000\nEpoch 3: 5656 / 10000\nEpoch 4: 6032 / 10000\nEpoch 5: 6389 / 10000\nEpoch 6: 6429 / 10000\nEpoch 7: 6440 / 10000\nEpoch 8: 7232 / 10000\nEpoch 9: 7268 / 10000\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e750ec72e5291fc31df1fa3eaed68d4492aa9db3 | 206,978 | ipynb | Jupyter Notebook | Interactive_Distribution_Transformations.ipynb | GeostatsGuy/InteractivePython | 68de24e7b67a3e442117fd96559dfa813be35024 | [
"MIT"
] | 9 | 2021-04-13T15:14:12.000Z | 2021-07-16T18:45:03.000Z | Interactive_Distribution_Transformations.ipynb | GeostatsGuy/InteractivePython | 68de24e7b67a3e442117fd96559dfa813be35024 | [
"MIT"
] | null | null | null | Interactive_Distribution_Transformations.ipynb | GeostatsGuy/InteractivePython | 68de24e7b67a3e442117fd96559dfa813be35024 | [
"MIT"
] | 7 | 2021-04-12T16:59:19.000Z | 2022-02-26T07:57:17.000Z | 167.865369 | 45,888 | 0.855439 | [
[
[
"<p align=\"center\">\n <img src=\"https://github.com/GeostatsGuy/GeostatsPy/blob/master/TCG_color_logo.png?raw=true\" width=\"220\" height=\"240\" />\n\n</p>\n\n## Data Analytics \n\n### Interactive Distribution Transformations in Python \n\n\n#### Michael Pyrcz, Associate Professor, University of Texas at Austin \n\n##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)\n",
"_____no_output_____"
],
[
"### Data Analytics: Distribution Transformations\n\nHere's a demonstration of making and general use of distribution transformations in Python. This demonstration is part of the resources that I include for my courses in Spatial / Subsurface Data Analytics at the Cockrell School of Engineering at the University of Texas at Austin. \n\n#### Distribution Transformations\n\nWhy do we do this?\n\n* **Inference**: variable has expected shape \n\n* **Data Preparation / Cleaning**: correcting for too few data and outliers\n\n* **Theory**: a specific distribution assumption required for a method\n\nHow do we do it?\n\nWe apply this to all sample data, $x_{\\alpha}$ $\\forall$ $\\alpha = 1,\\ldots,n$.\n\n\\begin{equation}\ny_{\\alpha} = G^{-1}_Y\\left(F_X(x_{\\alpha})\\right)\n\\end{equation}\n\nwere $X$ is the original feature with a $F_X$ original cumulative distribution function and $Y$ is transformed feature with a $G_Y$ transformed cumulative distribution function.\n\n* Mapping from one distribution to another through percentiles\n\n* This may be applied to any parametric or nonparametric distributions\n\n* This is a rank preserving transform, e.g. P50 of 𝑋 is P50 of 𝑌\n\nI have a lecture on distribution transformations available on [YouTube](https://www.youtube.com/watch?v=ZDIpE3OkAIU&list=PLG19vXLQHvSB-D4XKYieEku9GQMQyAzjJ&index=14). \n\n#### Getting Started\n\nHere's the steps to get setup in Python with the GeostatsPy package:\n\n1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). \n2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. \n3. In the terminal type: pip install geostatspy. \n4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. \n\nYou will need to copy the data file to your working directory. They are available here:\n\n* Tabular data - [sample_data.csv](https://github.com/GeostatsGuy/GeoDataSets/blob/master/sample_data.csv).\n\n#### Importing Packages\n\nWe will need some standard packages. These should have been installed with Anaconda 3.",
"_____no_output_____"
]
],
[
[
"import numpy as np # ndarrys for gridded data\nimport pandas as pd # DataFrames for tabular data\nimport matplotlib.pyplot as plt # plotting\nfrom scipy import stats # summary statistics\nimport math # trigonometry etc.\nimport random # randon numbers\nfrom scipy.stats import norm # Gaussian parametric distribution\nimport geostatspy.GSLIB as GSLIB\nfrom ipywidgets import interactive # widgets and interactivity\nfrom ipywidgets import widgets \nfrom ipywidgets import Layout\nfrom ipywidgets import Label\nfrom ipywidgets import VBox, HBox",
"_____no_output_____"
]
],
[
[
"#### Set the Random Number Seed\n\nSet the random number seed so that we have a repeatable workflow",
"_____no_output_____"
]
],
[
[
"seed = 73073",
"_____no_output_____"
]
],
[
[
"#### Loading Tabular Data\n\nHere's the command to load our comma delimited data file in to a Pandas' DataFrame object. For fun try misspelling the name. You will get an ugly, long error. ",
"_____no_output_____"
]
],
[
[
"data_url = \"https://raw.githubusercontent.com/GeostatsGuy/GeoDataSets/master/sample_data.csv\"\ndf = pd.read_csv(data_url ) # load our data table (wrong name!)",
"_____no_output_____"
]
],
[
[
"It worked, we loaded our file into our DataFrame called 'df'. But how do you really know that it worked? Visualizing the DataFrame would be useful and we already leard about these methods in this demo (https://git.io/fNgRW). \n\nWe can preview the DataFrame by printing a slice or by utilizing the 'head' DataFrame member function (with a nice and clean format, see below). With the slice we could look at any subset of the data table and with the head command, add parameter 'n=13' to see the first 13 rows of the dataset. ",
"_____no_output_____"
]
],
[
[
"df.head(n=6) # we could also use this command for a table preview",
"_____no_output_____"
]
],
[
[
"#### Calculating and Plotting a CDF by Hand\n\nLet's demonstrate the calculation and plotting of a non-parametric CDF by hand\n\n1. make a copy of the feature as a 1D array (ndarray from NumPy)\n2. sort the data in ascending order\n3. assign cumulative probabilities based on the tail assumptions\n4. plot cumuative probability vs. value",
"_____no_output_____"
]
],
[
[
"por = df['Porosity'].copy(deep = True).values # make a deepcopy of the feature from the DataFrame\nprint('The ndarray has a shape of ' + str(por.shape) + '.')\n\npor = np.sort(por) # sort the data in ascending order\nn = por.shape[0] # get the number of data samples\n\ncprob = np.zeros(n)\nfor i in range(0,n):\n index = i + 1\n cprob[i] = index / n # known upper tail\n # cprob[i] = (index - 1)/n # known lower tail\n # cprob[i] = (index - 1)/(n - 1) # known upper and lower tails\n # cprob[i] = index/(n+1) # unknown tails \n\nplt.subplot(111)\nplt.plot(por,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(por,cprob,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\nplt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Non-parametric Porosity Cumulative Distribution Function\")\n\nplt.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.2, wspace=0.1, hspace=0.2)\nplt.show()",
"The ndarray has a shape of (261,).\n"
]
],
[
[
"#### Transformation to a Parametric Distribution\n\nWe can transform our data feature distribution to any parametric distribution with this workflow.\n\n1. Calculate the cumulative probability value of each of our data values, $p_{\\alpha} = F_x(x_\\alpha)$, $\\forall$ $\\alpha = 1,\\ldots, n$.\n\n2. Apply the inverse of the target parametric cumulative distribution function (CDF) to calculate the transformed values. $y_{\\alpha} = G_y^{-1}\\left(F_x(x_\\alpha)\\right)$, $\\forall$ $\\alpha = 1,\\ldots, n$.\n\n",
"_____no_output_____"
]
],
[
[
"y = np.zeros(n)\n\nfor i in range(0,n):\n y[i] = norm.ppf(cprob[i],loc=0.0,scale=1.0)\n\nplt.subplot(121)\nplt.plot(por,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(por,cprob,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\nplt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Non-parametric Porosity Cumulative Distribution Function\")\n\nplt.subplot(122)\nplt.plot(y,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(y,cprob,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\nplt.grid(); plt.xlim([-3.0,3.0]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"After Distribution Transformation to Gaussian\")\n\nplt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.2, wspace=0.2, hspace=0.2)\nplt.show()\n ",
"_____no_output_____"
]
],
[
[
"Let's make an interactive version of this plot to visualize the transformation.",
"_____no_output_____"
]
],
[
[
"# widgets and dashboard\nl = widgets.Text(value=' Data Analytics, Distribution Transformation, Prof. Michael Pyrcz, The University of Texas at Austin',layout=Layout(width='950px', height='30px'))\n\ndata_index = widgets.IntSlider(min=1, max = n-1, value=1.0, step = 10.0, description = 'Data Index, $\\\\alpha$',orientation='horizontal', style = {'description_width': 'initial'}, continuous_update=False)\n\nui = widgets.VBox([l,data_index],)\n\ndef run_plot(data_index): # make data, fit models and plot\n plt.subplot(131)\n plt.plot(por,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\n plt.scatter(por,cprob,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\n plt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\n plt.xlabel(\"Original Feature, $x$\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Original Cumulative Distribution Function\")\n plt.plot([por[data_index-1],por[data_index-1]],[0.0,cprob[data_index-1]],color = 'red',linestyle='dashed')\n plt.plot([por[data_index-1],3.0],[cprob[data_index-1],cprob[data_index-1]],color = 'red',linestyle='dashed')\n plt.annotate('x = ' + str(round(por[data_index-1],2)), xy=(por[data_index-1]+0.003, 0.01))\n plt.annotate('p = ' + str(round(cprob[data_index-1],2)), xy=(0.225, cprob[data_index-1]+0.02))\n\n \n plt.subplot(132)\n plt.plot(y,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\n plt.scatter(y,cprob,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\n plt.grid(); plt.xlim([-3.0,3.0]); plt.ylim([0.0,1.0])\n plt.xlabel(\"Gaussian Transformed Feature, $y$\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"After Distribution Transformation to Gaussian\")\n plt.plot([-3.0,y[data_index-1]],[cprob[data_index-1],cprob[data_index-1]],color = 'red',linestyle='dashed')\n plt.plot([y[data_index-1],y[data_index-1]],[0.0,cprob[data_index-1]],color = 'red',linestyle='dashed')\n #plt.arrow(y[data_index-1],cprob[data_index-1],0.0,-1.0*(cprob[data_index-1]-0.01),color = 'red',width = 0.02, head_width = 0.1, linestyle='dashed', head_length = 0.01)\n plt.annotate('p = ' + str(round(cprob[data_index-1],2)), xy=(-2.90, cprob[data_index-1]+0.02)) \n plt.annotate('y = ' + str(round(y[data_index-1],2)), xy=(y[data_index-1]+0.1, 0.01))\n \n plt.subplot(133)\n plt.plot(por,y, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\n plt.grid(); plt.xlim([0.05,0.25]); plt.ylim([-3.0,3.0])\n plt.xlabel(\"Original Porosity (fraction)\"); plt.ylabel(\"Gaussian Transformed Porosity (N[fraction])\"); plt.title(\"Parametric Distribution Transformation, Q-Q Plot\")\n #plt.plot([0.05,0.25],[0.05,0.25],color = 'red',linestyle='dashed', alpha = 0.4)\n plt.scatter(por[data_index-1],y[data_index-1],s = 50, c = 'red', edgecolor = 'black', alpha = 1.0, zorder=200) # plot the CDF points\n plt.scatter(por,y,s = 20, c = 'red', edgecolor = 'black', alpha = 0.1, zorder=100) # plot the CDF points\n \n plt.subplots_adjust(left=0.0, bottom=0.0, right=3.0, top=1.2, wspace=0.2, hspace=0.2)\n plt.show()\n \n# connect the function to make the samples and plot to the widgets \ninteractive_plot = widgets.interactive_output(run_plot, {'data_index':data_index})\ninteractive_plot.clear_output(wait = True) # reduce flickering by delaying plot updating",
"_____no_output_____"
]
],
[
[
"### Interactive Data Analytics Distribution Transformation Demonstration \n\n#### Michael Pyrcz, Associate Professor, The University of Texas at Austin \n\nSelect any data value and observe the distribution transform by mapping through cumulative probability.\n\n### The Inputs\n\n* **data_index** - the data index from 1 to n in the sorted ascending order",
"_____no_output_____"
]
],
[
[
"display(ui, interactive_plot) # display the interactive plot",
"_____no_output_____"
]
],
[
[
"#### Distribution Transform to a Non-Parametric Distribution\n\nWe can apply the mapping through cumulative probabilities to transform from any distribution to any other distribution.\n\n* let's make a new data set by randomly sampling from the previous one and adding error\n\nThen we can demonstrate transforming this dataset to match the original distribution\n\n* this is mimicking the situation where we transform a dataset to match the distribution of a better sampled analog distribution\n",
"_____no_output_____"
]
],
[
[
"n_sample = 30\ndf_sample = df.sample(n_sample,random_state = seed)\n \ndf_sample = df_sample.copy(deep = True) # make a deepcopy of the feature from the DataFrame\n\ndf_sample['Porosity'] = df_sample['Porosity'].values + np.random.normal(loc = 0.0, scale = 0.01, size = n_sample)\n\ndf_sample = df_sample.sort_values(by = 'Porosity') # sort the DataFrame\npor_sample = df_sample['Porosity'].values\nprint('The sample ndarray has a shape of ' + str(por_sample.shape) + '.')\n\ncprob_sample = np.zeros(n_sample)\nfor i in range(0,n_sample):\n index = i + 1\n cprob_sample[i] = index / n_sample # known upper tail\n # cprob[i] = (index - 1)/n # known lower tail\n # cprob[i] = (index - 1)/(n - 1) # known upper and lower tails\n # cprob[i] = index/(n+1) # unknown tails \n\nplt.subplot(121)\nplt.plot(por_sample,cprob_sample, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(por_sample,cprob_sample,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\nplt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Sparse Sample with Noise Cumulative Distribution Function\")\n\nplt.subplot(122)\nplt.plot(por,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(por,cprob,s = 10, alpha = 1.0, c = 'red', edgecolor = 'black') # plot the CDF points\nplt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Non-parametric Porosity Cumulative Distribution Function\")\n\nplt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.2, wspace=0.2, hspace=0.2)\nplt.show()",
"The sample ndarray has a shape of (30,).\n"
]
],
[
[
"Let's transform the values and show them on the target distribution.",
"_____no_output_____"
]
],
[
[
"y_sample = np.zeros(n_sample)\n\nfor i in range(0,n_sample):\n y_sample[i] = np.percentile(por,cprob_sample[i]*100, interpolation = 'linear') # piecewise linear interpolation of inverse of target CDF \n \nplt.subplot(121)\nplt.plot(por_sample,cprob_sample, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(por_sample,cprob_sample,s = 30, alpha = 1.0, c = 'green', edgecolor = 'black', zorder = 100) # plot the CDF points\nplt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Sparse Sample with Noise Cumulative Distribution Function\")\n\nplt.subplot(122)\nplt.plot(por,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\nplt.scatter(por,cprob,s = 10, c = 'red', edgecolor = 'black', alpha = 0.3) # plot the CDF points\nplt.scatter(y_sample,cprob_sample,s = 30, c = 'green', edgecolor = 'black', alpha = 1.0, zorder = 100) # plot the CDF points\nplt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\nplt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Non-parametric Porosity Cumulative Distribution Function\")\n\nplt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.2, wspace=0.2, hspace=0.2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Let's make an interactive version of this plot to visualize the transformation.",
"_____no_output_____"
]
],
[
[
"# widgets and dashboard\nl_sample = widgets.Text(value=' Data Analytics, Distribution Transformation, Prof. Michael Pyrcz, The University of Texas at Austin',layout=Layout(width='950px', height='30px'))\n\ndata_index_sample = widgets.IntSlider(min=1, max = n_sample, value=1.0, step = 1.0, description = 'Data Sample Index, $\\\\beta$',orientation='horizontal', style = {'description_width': 'initial'}, continuous_update=False)\n\nui_sample = widgets.VBox([l_sample,data_index_sample],)\n\ndef run_plot_sample(data_index_sample): # make data, fit models and plot\n plt.subplot(131)\n plt.plot(por_sample,cprob_sample, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\n plt.scatter(por_sample,cprob_sample,s = 30, alpha = 1.0, c = 'green', edgecolor = 'black',zorder = 100) # plot the CDF points\n plt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\n plt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Original Sparse Sample with Noise, Cumulative Distribution Function\")\n plt.plot([por_sample[data_index_sample-1],por_sample[data_index_sample-1]],[0.0,cprob_sample[data_index_sample-1]],color = 'red',linestyle='dashed')\n plt.plot([por_sample[data_index_sample-1],3.0],[cprob_sample[data_index_sample-1],cprob_sample[data_index_sample-1]],color = 'red',linestyle='dashed')\n plt.annotate('x = ' + str(round(por_sample[data_index_sample-1],2)), xy=(por_sample[data_index_sample-1]+0.003, 0.01))\n plt.annotate('p = ' + str(round(cprob_sample[data_index_sample-1],2)), xy=(0.225, cprob_sample[data_index_sample-1]+0.02))\n \n plt.subplot(132)\n plt.plot(por,cprob, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\n plt.scatter(por,cprob,s = 10, c = 'red', edgecolor = 'black', alpha = 1.0) # plot the CDF points\n plt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.0,1.0])\n plt.xlabel(\"Porosity (fraction)\"); plt.ylabel(\"Cumulative Probability\"); plt.title(\"Non-parametric Target Porosity Cumulative Distribution Function\")\n plt.plot([0.0,y_sample[data_index_sample-1]],[cprob_sample[data_index_sample-1],cprob_sample[data_index_sample-1]],color = 'red',linestyle='dashed')\n plt.plot([y_sample[data_index_sample-1],y_sample[data_index_sample-1]],[0.0,cprob_sample[data_index_sample-1]],color = 'red',linestyle='dashed')\n plt.annotate('p = ' + str(round(cprob_sample[data_index_sample-1],2)), xy=(0.053, cprob_sample[data_index_sample-1]+0.02)) \n plt.annotate('y = ' + str(round(y_sample[data_index_sample-1],2)), xy=(y_sample[data_index_sample-1]+0.003, 0.01))\n plt.scatter(y_sample[data_index_sample-1],cprob_sample[data_index_sample-1],s = 50, c = 'green', edgecolor = 'black', alpha = 1.0, zorder=100) # plot the CDF points\n \n plt.subplot(133)\n plt.plot(por_sample,y_sample, alpha = 0.2, c = 'black') # plot piecewise linear interpolation\n plt.grid(); plt.xlim([0.05,0.25]); plt.ylim([0.05,0.25])\n plt.xlabel(\"Original Porosity (fraction)\"); plt.ylabel(\"Transformed Porosity (fraction)\"); plt.title(\"Non-parametric Distribution Transformation, Q-Q Plot\")\n plt.plot([0.05,0.25],[0.05,0.25],color = 'red',linestyle='dashed', alpha = 0.4)\n plt.scatter(por_sample[data_index_sample-1],y_sample[data_index_sample-1],s = 50, c = 'green', edgecolor = 'black', alpha = 1.0, zorder=200) # plot the CDF points\n plt.scatter(por_sample,y_sample,s = 20, c = 'green', edgecolor = 'black', alpha = 0.3, zorder=100) # plot the CDF points\n \n plt.subplots_adjust(left=0.0, bottom=0.0, right=3.0, top=1.2, wspace=0.2, hspace=0.2)\n plt.show()\n \n \n \n# connect the function to make the samples and plot to the widgets \ninteractive_plot_s = widgets.interactive_output(run_plot_sample, {'data_index_sample':data_index_sample})\n#interactive_plot_sample.clear_output(wait = True) # reduce flickering by delaying plot updating",
"_____no_output_____"
]
],
[
[
"### Interactive Data Analytics Distribution Transformation Demonstration \n\n#### Michael Pyrcz, Associate Professor, The University of Texas at Austin \n\nSelect any data value and observe the distribution transform by mapping through cumulative probability.\n\n#### The Inputs\n\n* **data_index** - the data index from 1 to n in the sorted ascending order",
"_____no_output_____"
]
],
[
[
"display(ui_sample, interactive_plot_s) # display the interactive plot",
"_____no_output_____"
]
],
[
[
"To summarize let's look at a DataFrame with the original noisey sample and the transformed to match the original distribution.\n\n* we're making and showing a table of original values, $x_{\\beta}$ $\\forall$ $\\beta = 1, \\ldots, n_{sample}$, and the transformed values, $y_{\\beta}$ $\\forall$ $\\beta = 1, \\ldots, n_{sample}$.\n\n",
"_____no_output_____"
]
],
[
[
"df_sample['Transformed_Por'] = y_sample\ndf_sample.head(n=n_sample)",
"_____no_output_____"
]
],
[
[
"It would be straitforward to modify the code above to perform distribution transformations:\n\n* to a parametric distribution like Gaussian\n\n* to a non-parametric distribution from actual data (build a CDF and interpolate between the data samples)\n\n#### Comments\n\nThis was a basic demonstration of distribution transformations. \n\nI have other demonstrations on the basics of working with DataFrames, ndarrays, univariate statistics, plotting data, declustering, data transformations, trend modeling and many other workflows available at [Python Demos](https://github.com/GeostatsGuy/PythonNumericalDemos) and a Python package for data analytics and geostatistics at [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy). \n \nI hope this was helpful,\n\n*Michael*\n\n#### The Author:\n\n### Michael Pyrcz, Associate Professor, University of Texas at Austin \n*Novel Data Analytics, Geostatistics and Machine Learning Subsurface Solutions*\n\nWith over 17 years of experience in subsurface consulting, research and development, Michael has returned to academia driven by his passion for teaching and enthusiasm for enhancing engineers' and geoscientists' impact in subsurface resource development. \n\nFor more about Michael check out these links:\n\n#### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)\n\n#### Want to Work Together?\n\nI hope this content is helpful to those that want to learn more about subsurface modeling, data analytics and machine learning. Students and working professionals are welcome to participate.\n\n* Want to invite me to visit your company for training, mentoring, project review, workflow design and / or consulting? I'd be happy to drop by and work with you! \n\n* Interested in partnering, supporting my graduate student research or my Subsurface Data Analytics and Machine Learning consortium (co-PIs including Profs. Foster, Torres-Verdin and van Oort)? My research combines data analytics, stochastic modeling and machine learning theory with practice to develop novel methods and workflows to add value. We are solving challenging subsurface problems!\n\n* I can be reached at [email protected].\n\nI'm always happy to discuss,\n\n*Michael*\n\nMichael Pyrcz, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin\n\n#### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e75105206248c578a2feea158201157507664814 | 13,007 | ipynb | Jupyter Notebook | Python-Kaggle/exercise-syntax-variables-and-numbers.ipynb | rhazra-003/30-Days-of-ML-Kaggle | 745c3775a0ef5627d316067765418ccc60a2a4cc | [
"Apache-2.0"
] | 6 | 2021-08-18T06:35:32.000Z | 2021-12-17T08:47:12.000Z | Python-Kaggle/exercise-syntax-variables-and-numbers.ipynb | rhazra-003/30-Days-of-ML-Kaggle | 745c3775a0ef5627d316067765418ccc60a2a4cc | [
"Apache-2.0"
] | null | null | null | Python-Kaggle/exercise-syntax-variables-and-numbers.ipynb | rhazra-003/30-Days-of-ML-Kaggle | 745c3775a0ef5627d316067765418ccc60a2a4cc | [
"Apache-2.0"
] | null | null | null | 13,007 | 13,007 | 0.690013 | [
[
[
"**This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/hello-python).**\n\n---\n",
"_____no_output_____"
],
[
"Welcome to your first set of Python coding problems. If this is your first time using Kaggle Notebooks, welcome! \n\nNotebooks are composed of blocks (called \"cells\") of text and code. Each of these is editable, though you'll mainly be editing the code cells to answer some questions.\n\nTo get started, try running the code cell below (by pressing the ► button, or clicking on the cell and pressing ctrl+enter on your keyboard).",
"_____no_output_____"
]
],
[
[
"print(\"You've successfully run some Python code\")\nprint(\"Congratulations!\")\nprint(\"Hello_World!\")",
"_____no_output_____"
]
],
[
[
"Try adding another line of code in the cell above and re-running it. \n\nNow let's get a little fancier: Add a new code cell by clicking on an existing code cell, hitting the escape key, and then hitting the `a` or `b` key. The `a` key will add a cell above the current cell, and `b` adds a cell below.\n\nGreat! Now you know how to use Notebooks.\n\nEach hands-on exercise starts by setting up our feedback and code checking mechanism. Run the code cell below to do that. Then you'll be ready to move on to question 0.",
"_____no_output_____"
]
],
[
[
"from learntools.core import binder; binder.bind(globals())\nfrom learntools.python.ex1 import *\nprint(\"Setup complete! You're ready to start question 0.\")",
"_____no_output_____"
]
],
[
[
"# 0.\n\n*This is a silly question intended as an introduction to the format we use for hands-on exercises throughout all Kaggle courses.*\n\n**What is your favorite color? **\n\nTo complete this question, create a variable called `color` in the cell below with an appropriate value. The function call `q0.check()` (which we've already provided in the cell below) will check your answer.",
"_____no_output_____"
]
],
[
[
"# create a variable called color with an appropriate value on the line below\n# (Remember, strings in Python must be enclosed in 'single' or \"double\" quotes)\ncolor = \"Blue\"\n\n# Check your answer\nq0.check()",
"_____no_output_____"
]
],
[
[
"Didn't get the right answer? How do you not even know your own favorite color?!\n\nDelete the `#` in the line below to make one of the lines run. You can choose between getting a hint or the full answer by choosing which line to remove the `#` from. \n\nRemoving the `#` is called uncommenting, because it changes that line from a \"comment\" which Python doesn't run to code, which Python does run.",
"_____no_output_____"
]
],
[
[
"#q0.hint()\nq0.solution()",
"_____no_output_____"
]
],
[
[
"The upcoming questions work the same way. The only thing that will change are the question numbers. For the next question, you'll call `q1.check()`, `q1.hint()`, `q1.solution()`, for question 2, you'll call `q2.check()`, and so on.",
"_____no_output_____"
],
[
"<hr/>\n\n# 1.\n\nComplete the code below. In case it's helpful, here is the table of available arithmetic operations:\n\n\n\n| Operator | Name | Description |\n|--------------|----------------|--------------------------------------------------------|\n| ``a + b`` | Addition | Sum of ``a`` and ``b`` |\n| ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |\n| ``a * b`` | Multiplication | Product of ``a`` and ``b`` |\n| ``a / b`` | True division | Quotient of ``a`` and ``b`` |\n| ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |\n| ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |\n| ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |\n| ``-a`` | Negation | The negative of ``a`` |\n\n<span style=\"display:none\"></span>\n",
"_____no_output_____"
]
],
[
[
"pi = 3.14159 # approximate\ndiameter = 3\n\n# Create a variable called 'radius' equal to half the diameter\nradius = diameter/2\n\n# Create a variable called 'area', using the formula for the area of a circle: pi times the radius squared\narea = pi*radius*radius\n\n# Check your answer\nq1.check()",
"_____no_output_____"
],
[
"# Uncomment and run the lines below if you need help.\n#q1.hint()\n#q1.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n\n# 2.\n\nAdd code to the following cell to swap variables `a` and `b` (so that `a` refers to the object previously referred to by `b` and vice versa).",
"_____no_output_____"
]
],
[
[
"########### Setup code - don't touch this part ######################\n# If you're curious, these are examples of lists. We'll talk about \n# them in depth a few lessons from now. For now, just know that they're\n# yet another type of Python object, like int or float.\na = [1, 2, 3]\nb = [3, 2, 1]\nq2.store_original_ids()\n######################################################################\n\n# Your code goes here. Swap the values to which a and b refer.\n# If you get stuck, you can always uncomment one or both of the lines in\n# the next cell for a hint, or to peek at the solution.\ntmp = a\na = b\nb = tmp\n######################################################################\n\n# Check your answer\nq2.check()",
"_____no_output_____"
],
[
"#q2.hint()",
"_____no_output_____"
],
[
"#q2.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n\n# 3a.\n\nAdd parentheses to the following expression so that it evaluates to 1.",
"_____no_output_____"
]
],
[
[
"(5 - 3 )// 2",
"_____no_output_____"
],
[
"#q3.a.hint()",
"_____no_output_____"
],
[
"# Check your answer (Run this code cell to receive credit!)\nq3.a.solution()",
"_____no_output_____"
]
],
[
[
"# 3b. <span title=\"A bit spicy\" style=\"color: darkgreen \">🌶️</span>\n\n<small>Questions, like this one, marked a spicy pepper are a bit harder.</small>\n\nAdd parentheses to the following expression so that it evaluates to 0.",
"_____no_output_____"
]
],
[
[
"8 - (3 * 2) -( 1 + 1)",
"_____no_output_____"
],
[
"#q3.b.hint()",
"_____no_output_____"
],
[
"# Check your answer (Run this code cell to receive credit!)\nq3.b.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n\n# 4. \nAlice, Bob and Carol have agreed to pool their Halloween candy and split it evenly among themselves.\nFor the sake of their friendship, any candies left over will be smashed. For example, if they collectively\nbring home 91 candies, they'll take 30 each and smash 1.\n\nWrite an arithmetic expression below to calculate how many candies they must smash for a given haul.",
"_____no_output_____"
]
],
[
[
"# Variables representing the number of candies collected by alice, bob, and carol\nalice_candies = 121\nbob_candies = 77\ncarol_candies = 109\n\n# Your code goes here! Replace the right-hand side of this assignment with an expression\n# involving alice_candies, bob_candies, and carol_candies\n\nto_smash = (alice_candies + bob_candies + carol_candies) % 3\n# Check your answer\nq4.check()",
"_____no_output_____"
],
[
"#q4.hint()\nq4.solution()",
"_____no_output_____"
]
],
[
[
"# Keep Going\n\nNext up, you'll **[learn to write new functions and understand functions others write](https://www.kaggle.com/colinmorris/functions-and-getting-help)**. This will make you at least 10 times more productive as a Python programmer. ",
"_____no_output_____"
],
[
"---\n\n\n\n\n*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161283) to chat with other Learners.*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e75116ee2a3a09ca76bb5f7393ed63d504eb5235 | 391,887 | ipynb | Jupyter Notebook | examples/spring_quarter_example.ipynb | SacPec/Route_Dynamics_S-dev | 97214724dd520d3e618304e7516de79e7731bed5 | [
"MIT"
] | 4 | 2019-06-14T20:54:55.000Z | 2021-02-26T03:15:20.000Z | examples/spring_quarter_example.ipynb | SacPec/Route_Dynamics_S-dev | 97214724dd520d3e618304e7516de79e7731bed5 | [
"MIT"
] | 9 | 2019-05-13T14:49:42.000Z | 2020-12-17T04:48:33.000Z | examples/spring_quarter_example.ipynb | SacPec/Route_Dynamics_S-dev | 97214724dd520d3e618304e7516de79e7731bed5 | [
"MIT"
] | 7 | 2020-02-04T20:12:42.000Z | 2021-11-03T19:27:01.000Z | 596.479452 | 116,248 | 0.948748 | [
[
[
"# Route_Dynamics Example",
"_____no_output_____"
]
],
[
[
"import os \nimport sys\nmodule_path = os.path.abspath(os.path.join('..'))\nsys.path.append(module_path)",
"_____no_output_____"
],
[
"from route_dynamics.route_energy import longi_dynam_model as ldm\nfrom route_dynamics.route_riders import route_riders as ride\nfrom route_dynamics.route_visualizer import visualizer as vis",
"/Users/ryanjcarlin/miniconda3/envs/simple_route_dynamics/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3214: DtypeWarning: Columns (46) have mixed types. Specify dtype option on import or set low_memory=False.\n if (yield from self.run_code(code, result)):\n"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"shapefile_name = '../data/six_routes.shp'\nrasterfile_name = '../data/seattle_dtm.tif'\nroute_num = 45\n# avalible routes: [48, 50, 75, 7, 45, 40]",
"_____no_output_____"
],
[
"df_45, riders_45, mass_45 = ride.route_ridership('PM', 'O', 45)\nstop_coord, rider_coord = ride.stop_coord(45, riders_45)\ncoords = rider_coord['coordinates'].values\nmass = rider_coord['Mean'].values",
"_____no_output_____"
],
[
"ex = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords=coords,\n mass_array=mass + mass_45,\n a_m = 0.5\n)",
"_____no_output_____"
],
[
"ami45_plot = vis.profile_x(ex.raw_batt_power_exert, ex.route_df.cum_distance, route_num)\nami45_plot",
"_____no_output_____"
],
[
"df_45, case1_45, mass_45 = ride.route_ridership('AM', 'I', 45)\ndf_45, case2_45, mass_45 = ride.route_ridership('AM', 'I', 45)\ndf_45, case3_45, mass_45 = ride.route_ridership('AM', 'I', 45)\ncase1_45['Mean'] = 0\ncase3_45['Mean'] = 81 * 80\n",
"_____no_output_____"
],
[
"c1df, c1_comb = ride.stop_coord(45, case1_45)",
"_____no_output_____"
],
[
"c2df, c2_comb = ride.stop_coord(45, case2_45)",
"_____no_output_____"
],
[
"c3df, c3_comb = ride.stop_coord(45, case3_45) ",
"_____no_output_____"
],
[
"coords1 = c1_comb['coordinates'].values\ncoords2 = c2_comb['coordinates'].values\ncoords3 = c3_comb['coordinates'].values\n\nmass1 = c1_comb['Mean'].values\nmass2 = c2_comb['Mean'].values\nmass3 = c3_comb['Mean'].values",
"_____no_output_____"
],
[
"rd1 = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords=coords1,\n mass_array=mass1 + mass_45,\n a_m = 0.5\n \n)",
"_____no_output_____"
],
[
"rd2 = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords=coords2,\n mass_array=mass2 + mass_45,\n a_m = 0.5\n \n)",
"_____no_output_____"
],
[
"rd3 = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords=coords3,\n mass_array=mass3 + mass_45,\n a_m = 0.5\n)",
"_____no_output_____"
],
[
"rd1.energy_from_route()",
"_____no_output_____"
],
[
"rd2.energy_from_route()",
"_____no_output_____"
],
[
"rd3.energy_from_route()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 5))\n\nax.fill_between(rd1.route_df.cum_distance/1000, no_stops.route_df.elevation, color='#BDBDBD')\nax1 = ax.twinx() \n\nax1.plot(rd3.route_df.cum_distance/1000, rd3.raw_batt_power_exert/1000, linewidth=4, label='Maximum Riders', zorder=100)\nax1.plot(rd2.route_df.cum_distance/1000, rd2.raw_batt_power_exert/1000, linewidth=4, label='Normal Riders', zorder=200)\nax1.plot(rd1.route_df.cum_distance/1000, rd1.raw_batt_power_exert/1000, linewidth=4, label='No Riders', zorder=300)\n\nplt.legend(fontsize=12)\nfig.suptitle(\n 'Route {}'.format(route_num) + ': Effect of Varying Ridership',\n fontsize=20,\n y=0.95,\n )\n\nax.set_xlabel('Distance (km)', fontsize=20)\nax1.set_ylabel('Load (kW)', fontsize=20)\nax.set_ylabel('Elevation (m)', fontsize=20)\nax.tick_params(labelsize=14)\nax1.tick_params(labelsize=14)\nax1.grid(axis='y')\n\n#plt.savefig('Ridership_load.png', dpi=300)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 5))\n\nax.fill_between(rd1.route_df.cum_distance/1000, y1=240, y2=-170, color='#A9F5A9')\n\nax.plot(rd3.route_df.cum_distance/1000, rd3.raw_batt_power_exert/1000, linewidth=4, label='Actual Stops', zorder=100)\nax.plot(rd2.route_df.cum_distance/1000, rd2.raw_batt_power_exert/1000, linewidth=4, label='Random Stops', zorder=200, linestyle='--')\nax.plot(rd1.route_df.cum_distance/1000, rd1.raw_batt_power_exert/1000, linewidth=4, label='No Stops', zorder=300, linestyle=(0, (1,1)))\n\nax.set_xlabel('Distance (km)', fontsize=20)\nax.set_ylabel('Load (kW)', fontsize=20)\nax.tick_params(labelsize=14)\nax.grid(axis='y')\nplt.xlim([0.5,1.5])\nplt.legend(fontsize=12)\nfig.suptitle(\n 'Route {}'.format(route_num) + ': Effect of Varying Ridership (Segment)',\n fontsize=20,\n y=0.95,\n )\n\n\n#plt.savefig('Ridership_load_segment.png', dpi=300)",
"_____no_output_____"
],
[
"no_stops = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords=None,\n mass_array=mass_45\n)",
"_____no_output_____"
],
[
"real_stops = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords=coords2,\n mass_array=mass_45\n)",
"_____no_output_____"
],
[
"random_stops = ldm.RouteTrajectory(\n route_num, \n shapefile_name, \n rasterfile_name,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n stop_coords='random',\n mass_array=mass_45\n)",
"_____no_output_____"
],
[
"no_stops.energy_from_route()",
"_____no_output_____"
],
[
"real_stops.energy_from_route()",
"_____no_output_____"
],
[
"random_stops.energy_from_route()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 5))\n\nax.fill_between(no_stops.route_df.cum_distance/1000, no_stops.route_df.elevation, color='#BDBDBD')\nax1 = ax.twinx() \n\nax1.plot(real_stops.route_df.cum_distance/1000, real_stops.raw_batt_power_exert/1000, linewidth=4, label='Actual Stops', zorder=100)\nax1.plot(random_stops.route_df.cum_distance/1000, random_stops.raw_batt_power_exert/1000, linewidth=4, label='Random Stops', zorder=200)\nax1.plot(no_stops.route_df.cum_distance/1000, no_stops.raw_batt_power_exert/1000, linewidth=4, label='No Stops', zorder=300)\n#ax.set_xlabel('Distance (m)')\n#ax.set_ylabel('Load (kW)')\n#ax.tick_params('y')\n#ax.grid()\n#plt.xlim([0,2000])\n#linestyle=(0, (1,1))\nplt.legend(fontsize=12)\nfig.suptitle(\n 'Route {}'.format(route_num) + ': Effect of Varying Stop Frequency',\n fontsize=20,\n y=0.95,\n )\n\nax.set_xlabel('Distance (km)', fontsize=20)\nax1.set_ylabel('Load (kW)', fontsize=20)\nax.set_ylabel('Elevation (m)', fontsize=20)\nax.tick_params(labelsize=14)\nax1.tick_params(labelsize=14)\nax1.grid(axis='y')\n\n#plt.savefig('Load_profile_stops.png', dpi=300)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 5))\n\nax.fill_between(no_stops.route_df.cum_distance/1000, y1=240, y2=-170, color='#A9F5A9')\n\nax.plot(real_stops.route_df.cum_distance/1000, real_stops.raw_batt_power_exert/1000, linewidth=4, label='Actual Stops', zorder=100)\nax.plot(random_stops.route_df.cum_distance/1000, random_stops.raw_batt_power_exert/1000, linewidth=4, label='Random Stops', zorder=200, linestyle='--')\nax.plot(no_stops.route_df.cum_distance/1000, no_stops.raw_batt_power_exert/1000, linewidth=4, label='No Stops', zorder=300, linestyle=(0, (1,1)))\n\nax.set_xlabel('Distance (km)', fontsize=20)\nax.set_ylabel('Load (kW)', fontsize=20)\nax.tick_params(labelsize=14)\nax.grid(axis='y')\nplt.xlim([4.2,5.2])\nplt.legend(fontsize=12)\nfig.suptitle(\n 'Route {}'.format(route_num) + ': Effect of Varying Stop Frequency (Segment)',\n fontsize=20,\n y=0.95,\n )\n\n\n#plt.savefig('Load_profile_stops_segment.png', dpi=300)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e751177cedb7ac91b3bd037cb705ee55a0763a46 | 11,677 | ipynb | Jupyter Notebook | dev_course/dl2/07a_lsuv.ipynb | rohitgr7/fastai_docs | 531139ac17dd2e0cf08a99b6f894dbca5028e436 | [
"Apache-2.0"
] | null | null | null | dev_course/dl2/07a_lsuv.ipynb | rohitgr7/fastai_docs | 531139ac17dd2e0cf08a99b6f894dbca5028e436 | [
"Apache-2.0"
] | null | null | null | dev_course/dl2/07a_lsuv.ipynb | rohitgr7/fastai_docs | 531139ac17dd2e0cf08a99b6f894dbca5028e436 | [
"Apache-2.0"
] | null | null | null | 25.551422 | 543 | 0.542348 | [
[
[
"%load_ext autoreload\n%autoreload 2\n\n%matplotlib inline",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"#export\nfrom exp.nb_07 import *",
"_____no_output_____"
]
],
[
[
"## Layerwise Sequential Unit Variance (LSUV)",
"_____no_output_____"
],
[
"Getting the MNIST data and a CNN",
"_____no_output_____"
]
],
[
[
"x_train,y_train,x_valid,y_valid = get_data()\n\nx_train,x_valid = normalize_to(x_train,x_valid)\ntrain_ds,valid_ds = Dataset(x_train, y_train),Dataset(x_valid, y_valid)\n\nnh,bs = 50,512\nc = y_train.max().item()+1\nloss_func = F.cross_entropy\n\ndata = DataBunch(*get_dls(train_ds, valid_ds, bs), c)",
"_____no_output_____"
],
[
"mnist_view = view_tfm(1,28,28)\ncbfs = [Recorder,\n partial(AvgStatsCallback,accuracy),\n CudaCallback,\n partial(BatchTransformXCallback, mnist_view)]",
"_____no_output_____"
],
[
"nfs = [8,16,32,64,64]",
"_____no_output_____"
],
[
"class ConvLayer(nn.Module):\n def __init__(self, ni, nf, ks=3, stride=2, sub=0., **kwargs):\n super().__init__()\n self.conv = nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True)\n self.relu = GeneralRelu(sub=sub, **kwargs)\n \n def forward(self, x): return self.relu(self.conv(x))\n \n @property\n def bias(self): return -self.relu.sub\n @bias.setter\n def bias(self,v): self.relu.sub = -v\n @property\n def weight(self): return self.conv.weight",
"_____no_output_____"
],
[
"learn,run = get_learn_run(nfs, data, 0.6, ConvLayer, cbs=cbfs)",
"_____no_output_____"
]
],
[
[
"Now we're going to look at the paper [All You Need is a Good Init](https://arxiv.org/pdf/1511.06422.pdf), which introduces *Layer-wise Sequential Unit-Variance* (*LSUV*). We initialize our neural net with the usual technique, then we pass a batch through the model and check the outputs of the linear and convolutional layers. We can then rescale the weights according to the actual variance we observe on the activations, and subtract the mean we observe from the initial bias. That way we will have activations that stay normalized.\n\nWe repeat this process until we are satisfied with the mean/variance we observe.\n\nLet's start by looking at a baseline:",
"_____no_output_____"
]
],
[
[
"run.fit(2, learn)",
"train: [1.73625, tensor(0.3975, device='cuda:0')]\nvalid: [1.68747265625, tensor(0.5652, device='cuda:0')]\ntrain: [0.356792578125, tensor(0.8880, device='cuda:0')]\nvalid: [0.13243565673828125, tensor(0.9588, device='cuda:0')]\n"
]
],
[
[
"Now we recreate our model and we'll try again with LSUV. Hopefully, we'll get better results!",
"_____no_output_____"
]
],
[
[
"learn,run = get_learn_run(nfs, data, 0.6, ConvLayer, cbs=cbfs)",
"_____no_output_____"
]
],
[
[
"Helper function to get one batch of a given dataloader, with the callbacks called to preprocess it.",
"_____no_output_____"
]
],
[
[
"#export\ndef get_batch(dl, run):\n run.xb,run.yb = next(iter(dl))\n for cb in run.cbs: cb.set_runner(run)\n run('begin_batch')\n return run.xb,run.yb",
"_____no_output_____"
],
[
"xb,yb = get_batch(data.train_dl, run)",
"_____no_output_____"
]
],
[
[
"We only want the outputs of convolutional or linear layers. To find them, we need a recursive function. We can use `sum(list, [])` to concatenate the lists the function finds (`sum` applies the + operate between the elements of the list you pass it, beginning with the initial state in the second argument).",
"_____no_output_____"
]
],
[
[
"#export\ndef find_modules(m, cond):\n if cond(m): return [m]\n return sum([find_modules(o,cond) for o in m.children()], [])\n\ndef is_lin_layer(l):\n lin_layers = (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear, nn.ReLU)\n return isinstance(l, lin_layers)",
"_____no_output_____"
],
[
"mods = find_modules(learn.model, lambda o: isinstance(o,ConvLayer))",
"_____no_output_____"
],
[
"mods",
"_____no_output_____"
]
],
[
[
"This is a helper function to grab the mean and std of the output of a hooked layer.",
"_____no_output_____"
]
],
[
[
"def append_stat(hook, mod, inp, outp):\n d = outp.data\n hook.mean,hook.std = d.mean().item(),d.std().item()",
"_____no_output_____"
],
[
"mdl = learn.model.cuda()",
"_____no_output_____"
]
],
[
[
"So now we can look at the mean and std of the conv layers of our model.",
"_____no_output_____"
]
],
[
[
"with Hooks(mods, append_stat) as hooks:\n mdl(xb)\n for hook in hooks: print(hook.mean,hook.std)",
"0.3813672363758087 0.6907835006713867\n0.3570525348186493 0.651114284992218\n0.28284627199172974 0.5356632471084595\n0.2487572282552719 0.42617663741111755\n0.15965904295444489 0.2474386990070343\n"
]
],
[
[
"We first adjust the bias terms to make the means 0, then we adjust the standard deviations to make the stds 1 (with a threshold of 1e-3). The `mdl(xb) is not None` clause is just there to pass `xb` through `mdl` and compute all the activations so that the hooks get updated. ",
"_____no_output_____"
]
],
[
[
"#export\ndef lsuv_module(m, xb):\n h = Hook(m, append_stat)\n\n while mdl(xb) is not None and abs(h.mean) > 1e-3: m.bias -= h.mean\n while mdl(xb) is not None and abs(h.std-1) > 1e-3: m.weight.data /= h.std\n\n h.remove()\n return h.mean,h.std",
"_____no_output_____"
]
],
[
[
"We execute that initialization on all the conv layers in order:",
"_____no_output_____"
]
],
[
[
"for m in mods: print(lsuv_module(m, xb))",
"(0.17071205377578735, 1.0)\n(0.08888687938451767, 1.0000001192092896)\n(0.1499888300895691, 0.9999999403953552)\n(0.15749432146549225, 1.0)\n(0.3106708824634552, 1.0)\n"
]
],
[
[
"Note that the mean doesn't exactly stay at 0. since we change the standard deviation after by scaling the weight.",
"_____no_output_____"
],
[
"Then training is beginning on better grounds.",
"_____no_output_____"
]
],
[
[
"%time run.fit(2, learn)",
"train: [0.42438078125, tensor(0.8629, device='cuda:0')]\nvalid: [0.14604696044921875, tensor(0.9548, device='cuda:0')]\ntrain: [0.128675537109375, tensor(0.9608, device='cuda:0')]\nvalid: [0.09168212280273437, tensor(0.9733, device='cuda:0')]\nCPU times: user 4.09 s, sys: 504 ms, total: 4.6 s\nWall time: 4.61 s\n"
]
],
[
[
"LSUV is particularly useful for more complex and deeper architectures that are hard to initialize to get unit variance at the last layer.",
"_____no_output_____"
],
[
"## Export",
"_____no_output_____"
]
],
[
[
"!python notebook2script.py 07a_lsuv.ipynb",
"Converted 07a_lsuv.ipynb to exp/nb_07a.py\r\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e75134b42db11b5a62260bc42edfe8d9505f1e21 | 217,898 | ipynb | Jupyter Notebook | content/notebooks/16-MulExpl.ipynb | zoeydy/CMEESamraat | 3c8e54d4878ff3f9b9272da73c3c8700902ddb21 | [
"MIT"
] | null | null | null | content/notebooks/16-MulExpl.ipynb | zoeydy/CMEESamraat | 3c8e54d4878ff3f9b9272da73c3c8700902ddb21 | [
"MIT"
] | null | null | null | content/notebooks/16-MulExpl.ipynb | zoeydy/CMEESamraat | 3c8e54d4878ff3f9b9272da73c3c8700902ddb21 | [
"MIT"
] | null | null | null | 275.820253 | 139,146 | 0.904891 | [
[
[
"library(repr) ; options(repr.plot.res = 100, repr.plot.width = 6, repr.plot.height = 6) # Change plot sizes (in cm) - this bit of code is only relevant if you are using a jupyter notebook - ignore otherwise",
"_____no_output_____"
]
],
[
[
"# Linear Models: Multiple explanatory variables",
"_____no_output_____"
],
[
"## Introduction\n\nIn this chapter we will explore fitting a linear model to data when you have multiple explanatory (predictor) variables. \n\nThe aims of this chapter are[$^{[1]}$](#fn1):\n\n* Learning to build and fit a linear model that includes several explanatory variables\n\n* Learning to interpret the summary tables and diagnostics after fitting a linear model with multiple explanatory variables\n\n## An example\n\nThe models we looked at in the [ANOVA chapter](15-anova.ipynb) explored whether the log genome size (C value, in picograms) of terrestrial mammals varied with trophic level and whether or not the species is ground dwelling. We will now look at a single model that includes both explanatory variables.\n\nThe first thing to do is look at the data again. \n\n### Exploring the data\n\n$\\star$ Create a new blank script called `MulExpl.R` in your `Code` directory and add some introductory comments.\n\n$\\star$ Load the data saved at the end of the [ANOVA chapter](15-anova.ipynb):",
"_____no_output_____"
]
],
[
[
"load('../data/mammals.Rdata')",
"_____no_output_____"
]
],
[
[
"Look back at the end of the previous chapter to see how you saved the RData file. If `mammals.Rdata` is missing, just import the data again using `read.csv` and add the `log C Value` column to the imported data frame again (go back to the [ANOVA chapter](15-anova.ipynb) and have a look if you have forgotten how).\n\nUse `ls()`, and then `str` to check that the data has loaded correctly:",
"_____no_output_____"
]
],
[
[
"str(mammals)",
"'data.frame':\t379 obs. of 10 variables:\n $ Binomial : Factor w/ 379 levels \"Acinonyx jubatus\",..: 1 2 3 4 5 6 7 8 9 10 ...\n $ meanCvalue : num 2.56 2.64 3.75 3.7 3.98 4.69 2.15 2.43 2.73 2.92 ...\n $ Order : Factor w/ 21 levels \"Artiodactyla\",..: 2 17 17 17 1 1 4 17 17 17 ...\n $ AdultBodyMass_g: num 50500 41.2 130 96.5 94700 52300 15 25.3 50.5 33 ...\n $ DietBreadth : int 1 NA 2 NA 5 2 NA 4 NA NA ...\n $ HabitatBreadth : int 1 NA 2 2 1 1 1 2 NA 1 ...\n $ LitterSize : num 2.99 2.43 3.07 NA 1 1 0.99 4.59 3.9 3.77 ...\n $ GroundDwelling : Factor w/ 2 levels \"No\",\"Yes\": 2 NA 2 2 2 2 1 2 NA 2 ...\n $ TrophicLevel : Factor w/ 3 levels \"Carnivore\",\"Herbivore\",..: 1 NA 2 NA 2 2 NA 3 NA NA ...\n $ logCvalue : num 0.94 0.971 1.322 1.308 1.381 ...\n"
]
],
[
[
"[Previously](14-regress.ipynb), we asked if carnivores or herbivores had larger genomes. Now we want to ask questions like: do ground-dwelling carnivores have larger genomes than arboreal or flying omnivores? We need to look at plots within groups.\n\nBefore we do that, there is a lot of missing data in the data frame and we should make sure that we are using the same data for our plots and models. We will subset the data down to the complete data for the three variables:",
"_____no_output_____"
]
],
[
[
"mammals <- subset(mammals, select = c(GroundDwelling, TrophicLevel, \nlogCvalue))\nmammals <- na.omit(mammals)\nstr(mammals)",
"'data.frame':\t259 obs. of 3 variables:\n $ GroundDwelling: Factor w/ 2 levels \"No\",\"Yes\": 2 2 2 2 2 1 2 1 1 1 ...\n $ TrophicLevel : Factor w/ 3 levels \"Carnivore\",\"Herbivore\",..: 1 2 2 2 3 3 3 2 2 3 ...\n $ logCvalue : num 0.94 1.322 1.381 1.545 0.888 ...\n - attr(*, \"na.action\")= 'omit' Named int [1:120] 2 4 7 9 10 11 14 15 20 21 ...\n ..- attr(*, \"names\")= chr [1:120] \"2\" \"4\" \"7\" \"9\" ...\n"
]
],
[
[
"### Boxplots within groups\n\n[Previously](14-regress.ipynb), we used the `subset` option to fit a model just to dragonflies. You can use `subset` with plots too.\n\n$\\star$ Add `par(mfrow=c(1,2))` to your script to split the graphics into two panels.\n\n$\\star$ Copy over and modify the code from the [ANOVA chapter](15-anova.ipynb) to create a boxplot of genome size by trophic level into your script.\n\n$\\star$ Now further modify the code to generate the plots shown in the figure below (you will have to `subset` your data for this, and also use the subset option of the `plot` command).\n\n---\n\n<img src=\"./graphics/boxplots.svg\" width=\"600px\">\n<small><br> <center> Boxplots of log c value by trophic level, sub-divided by habitat preference.\n</center> </small>\n\n---\n\n```{tip}\nYou can use the `plot` function's option `main = ` to add titles to a plot.\n```\n\n### `lattice` again\n\nRecall that the `lattice` package provides some very neat extra ways to plot data in groups. They look pretty but the downside is that they don't use the same graphics system — all those `par` commands are useless for these graphs. The defaults look good though!",
"_____no_output_____"
]
],
[
[
"library(lattice)\nbwplot(logCvalue ~ TrophicLevel | GroundDwelling, data= mammals)",
"_____no_output_____"
]
],
[
[
"The code `logCvalue ~ TrophicLevel | GroundDwelling` means plot the relationship between genome size and trophic level, but group within levels of ground dwelling. We are using the function `bwplot`, which is provided by `lattice` to create box and whisker plots.\n\n$\\star$ Create the lattice plots above from within your script.\n\nRearrange this code to have three plots, showing the box and whisker plots for `GroundDwelling`, grouped within the levels of `TrophicLevel`.\n\nTry reshaping the R plot window and running the command again. Lattice tries to make good use of the available space when creating lattice plots.\n\n### Barplots again\n\nWe're going to make the barplot code from [Regress](14-regress.ipynb) even more complicated! This time we want to know the mean log genome size within combinations of `TrophicLevel` and `GroundDwelling`. We can still use `tapply`, providing more than one grouping factor. We create a set of grouping factors like this:",
"_____no_output_____"
]
],
[
[
"groups <- list(mammals$GroundDwelling, mammals$TrophicLevel)\ngroupMeans <- tapply(mammals$logCvalue, groups, FUN = mean)\nprint(groupMeans)",
" Carnivore Herbivore Omnivore\nNo 0.9589465 1.012459 1.191760\nYes 1.2138170 1.297662 1.299017\n"
]
],
[
[
"$\\star$ Copy this code into your script and run it.\n\nUse this code and the script from the [ANOVA chapter](15-anova.ipynb) to get the set of\nstandard errors for the groups `groupSE`:",
"_____no_output_____"
]
],
[
[
"seMean <- function(x){\n\t# get rid of missing values\n\tx <- na.omit(x)\n\t# calculate the standard error\n\tse <- sqrt(var(x)/length(x))\n\t# tell the function to report the standard error\n\treturn(se)\n}",
"_____no_output_____"
],
[
"groups <- list(mammals$GroundDwelling, mammals$TrophicLevel)\n\ngroupMeans <- tapply(mammals$logCvalue, groups, FUN=mean)\nprint(groupMeans)",
" Carnivore Herbivore Omnivore\nNo 0.9589465 1.012459 1.191760\nYes 1.2138170 1.297662 1.299017\n"
],
[
"groupSE <- tapply(mammals$logCvalue, groups, FUN=seMean)\nprint(groupSE)",
" Carnivore Herbivore Omnivore\nNo 0.04842209 0.03418613 0.02410400\nYes 0.05975510 0.02787009 0.03586826\n"
]
],
[
[
"Now we can use `barplot`. The default option for a barplot of\na table is to create a stacked barplot, which is not what we want. The\noption `beside=TRUE` makes the bars for each column appear\nside by side.\n\nOnce again, we save the midpoints of the bars to add the\nerror bars. The other options in the code below change the colours of\nthe bars and the length of error bar caps.",
"_____no_output_____"
]
],
[
[
"# get upper and lower standard error height\nupperSE <- groupMeans + groupSE\nlowerSE <- groupMeans - groupSE\n# create barplot\nbarMids <- barplot(groupMeans, ylim=c(0, max(upperSE)), beside=TRUE, ylab= ' log C value (pg) ' , col=c( ' white ' , ' grey70 '))\narrows(barMids, upperSE, barMids, lowerSE, ang=90, code=3, len=0.05)",
"_____no_output_____"
]
],
[
[
"$\\star$ Generate the barplot above and then edit your script to change the colours and error bar lengths to your taste.\n\n### Plotting means and confidence intervals\n\nWe'll use the `plotmeans` function again as an exercise to change graph settings and to prepare figures for reports and write ups. This is the figure you should be able to reproduce the figure below.\n\n---\n\n<img src=\"./graphics/plotmeans.svg\" width=\"600px\">\n\n<small> <center> Means and 95% confidence intervals for log genome size (picograms) in mammals for different trophic levels for a) ground dwelling species and b) other species. </center> </small>\n\n---\n\n$\\star$ Use `plotmeans` from the [ANOVA chapter](15-anova.ipynb) and the `subset` option to generate the two plots below. You will need to\nset the `ylim` option for the two plots to make them use the same $y$ axis.\n\n$\\star$ Use `text` to add labels — the command `par('usr')` will show you the limits of the plot ($x_{min}, x_{max}, y_{min}, y_{max}$) and help pick a location for the labels.\n\n$\\star$ Change the `par` settings in your code and redraw the plots to try and make better use of the space. In the example below, the box shows the edges of the R graphics window.\n\nNote the following about the the figure above (generated using plotmeans)): \n\n* **White space**: The default options in R use wide margins and spaced out axes and take up a lot of space that could be used for plotting data. You've already seen the `par` function and the options `mfrow` for multiple plots and `mar` to adjust margin size. The option `mgp` adjusts the placement of the axis label, tick labels and tick locations. See `?par` for help on the these options.\n\n* **Main titles**: Adding large titles to graphs is also a bad idea — it uses lots of space to explain something that should be in the figure legend. With multiple plots in a figure, you have to label graphs so that the figure legend can refer to them. You can add labels using `text(x,y,'label')`.\n\n* **Figure legends**: A figure caption and legend should give a clear stand-alone description of the whole figure.\n\n* **Referring to figures**: You *must* link from your text to your figures — a reader has to know which figures refer to which results. So: \"There are clear differences in mean genome size between species at different trophic levels and between ground dwelling and other species, Figure xx\".\n\n\n## Fitting the linear model\n\nAll those exploratory visualizations suggest:\n\n* Carnivores have smaller genome size; omnivores have larger genome size.\n\n* Herbivores are somewhere in between, but not consistently.\n\n* All ground dwelling mammals typically have larger genome sizes.\n\nWe suspected these things from the [ANOVA chapter analyses](15-anova.ipynb), but now we can see that they might have separate effects. We'll fit a linear model to explore this and add the two explanatory variables together.\n\n$\\star$ This is an important section — read it through carefully and ask questions if you are unsure. Copy the code into your script and add comments. *Do not just jump to the next action item*!\n\n$\\star$ First, fit the model:",
"_____no_output_____"
]
],
[
[
"model <- lm(logCvalue ~ TrophicLevel + GroundDwelling, data = mammals) ",
"_____no_output_____"
]
],
[
[
"We're going to do things right this time and check the model diagnostics\nbefore we rush into interpretation.",
"_____no_output_____"
]
],
[
[
"library(repr) ; options(repr.plot.res = 100, repr.plot.width = 7, repr.plot.height = 8) # Change plot size",
"_____no_output_____"
],
[
"par(mfrow=c(2,2))\nplot(model)",
"_____no_output_____"
],
[
"library(repr) ; options(repr.plot.res = 100, repr.plot.width = 6, repr.plot.height = 6) # Change plot size",
"_____no_output_____"
]
],
[
[
"Examine these diagnostic plots. There are six predicted values now - three trophic levels for each of the two levels of ground dwelling. Those plots look ok so now we can look at the analysis of variance table:",
"_____no_output_____"
]
],
[
[
"anova(model)",
"_____no_output_____"
]
],
[
[
"*Ignore the $p$ values*! Yes, they're highly significant but we want to understand the model, not rubber stamp it with 'significant'.\n\nThe sums of squares for the variables are both small compared to the residual sums of squares — there is lots of unexplained variation. We can calculate the $r^2$ as explained sums of squares over total sums of squares:\n\n$$\\frac{0.81 + 2.75}{0.81 + 2.75 + 13.21} = \\frac{3.56}{16.77} = 0.212$$\n\nTrophic level explain much less variation than ground dwelling — this makes intuitive sense from the plots since there are big differences between in the figure we generated above (using plotmeans) (a vs b), but small differences within.\n\nWe could also calculate a significance for the whole model by merging the terms. The total explained sums of squares of $0.81 + 2.75 = 3.56$ uses $2+1 =3$ degrees of freedom, so the mean sums of squares for all the terms together is $3.56/3=1.187$. Dividing this by the residual mean square of 0.052 gives an F of $1.187 / 0.052 = 22.83$.\n\nNow we can look at the summary table to see the coefficients:",
"_____no_output_____"
]
],
[
[
"summary(model) ",
"_____no_output_____"
]
],
[
[
"Starting at the bottom of this output, `summary` has again calculated $r^2$ for us and also an $F$ statistic for the whole model, which matches the calculation above.\n\nThe other important bits are the four coefficients. The intercept is now the reference level for two variables: it is the mean for carnivores that are not ground dwelling. We then have differences from this value for being an omnivore or herbivore and for being ground dwelling. There is a big change in genome size associated with ground dwelling and\nomnivory and both of these have large effects sizes, each introducing about a 20% difference in genome size from the non-ground dwelling carnivores. In contrast, herbivory makes a small difference — about 8%. \n\nBecause the difference is small and the standard error is large, the $t$ value suggests that this difference might arise just by chance. Put another way, it isn't significant.\n\nThe table below shows how these four coefficients combine to give the predicted values for each of the group means.\n\n| |Carnivore|Herbivore|Omnivore|\n|:-|:---|:---|:---|\n|**Not ground** | *0.98* = 0.98 | *0.98 + 0.08* = 1.06 | *0.98 + 0.17* = 1.15 |\n|**Ground**| *0.98 + 0.21* = 1.19 | *0.98 + 0.08 + 0.21* =1.27 | *0.98 + 0.17 + 0.21* = 1.36|\n\n\n(16-MulExp:Predicted-values)=\n### Predicted values\n\nGetting the model predictions by hand in this way is tedious and error prone. There is a handy function called `predict` which uses the model directly to calculate values. The default is to give you the prediction for each point in the original data, but you can also ask for specific predictions.\n\nThe first thing to do is to set up a small data frame containing the explanatory values we want to use. The variable names and the level name have to match *exactly*, so we'll use the `levels` function to get the names. We want to look at all six combinations, so we'll use the `rep` function to set this up. The `each = 2` option repeats each value twice in succession; the `times = 3` options repeats the whole set of values three times.\n\nLet's do it:",
"_____no_output_____"
]
],
[
[
"# data frame of combinations of variables\ngd <- rep(levels(mammals$GroundDwelling), times = 3)\nprint(gd)",
"[1] \"No\" \"Yes\" \"No\" \"Yes\" \"No\" \"Yes\"\n"
],
[
"tl <- rep(levels(mammals$TrophicLevel), each = 2)\nprint(tl)",
"[1] \"Carnivore\" \"Carnivore\" \"Herbivore\" \"Herbivore\" \"Omnivore\" \"Omnivore\" \n"
],
[
"predVals <- data.frame(GroundDwelling = gd, TrophicLevel = tl)",
"_____no_output_____"
]
],
[
[
"Now we have the data frame of values we want, we can use `predict`. Just as when we created log values, we can save the output back into a new column in the data frame:",
"_____no_output_____"
]
],
[
[
"predVals$predict <- predict(model, newdata = predVals)\nprint(predVals)",
" GroundDwelling TrophicLevel predict\n1 No Carnivore 0.9797572\n2 Yes Carnivore 1.1892226\n3 No Herbivore 1.0563447\n4 Yes Herbivore 1.2658102\n5 No Omnivore 1.1524491\n6 Yes Omnivore 1.3619145\n"
]
],
[
[
"Not that these are in the same order as the bars from your barplot. \n\n$\\star$ Make a copy of the barplot and arrows code from above and modify it",
"_____no_output_____"
]
],
[
[
"barMids <- barplot(groupMeans, ylim=c(0, 1.4), ylab='log C value (pg)', beside=TRUE, col=c('white', 'grey70'))\n\narrows(barMids, upperSE, barMids, lowerSE, ang=90, code=3, len=0.1)\npoints(barMids, predVals$predict, col='red', pch=12)",
"_____no_output_____"
]
],
[
[
"The red markers do not match to the calculated means. This is because the model only includes a single difference between ground and non-ground species, which has to be the same for each trophic group. That is, there is no interaction between trophic level and ground / non-ground identity of each species in the current model.\n\n$\\star$ Add the code for this plot to your script file.\n\n[Next](17-MulExplInter.ipynb), we will look at interactions, which allows these values to differ using an interaction term in the model.\n\n---\n<a id=\"fn1\"></a>\n[1]: Here you work with the script file `MulExpl.R`",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7514b67f66695103ddaaa5ba91a09a2553744d0 | 8,178 | ipynb | Jupyter Notebook | board/RFSoC2x2/base/notebooks/microblaze/microblaze_c_libraries.ipynb | Zacarhay/RFSoC2x2-PYNQ | 30fcbb2ba3b2486d9fd92d3a9424eff0ac92f7cf | [
"BSD-3-Clause"
] | 14 | 2021-03-16T18:47:33.000Z | 2022-02-14T12:02:40.000Z | board/RFSoC2x2/base/notebooks/microblaze/microblaze_c_libraries.ipynb | Zacarhay/RFSoC2x2-PYNQ | 30fcbb2ba3b2486d9fd92d3a9424eff0ac92f7cf | [
"BSD-3-Clause"
] | 3 | 2021-02-25T15:19:45.000Z | 2021-11-22T23:45:44.000Z | board/RFSoC2x2/base/notebooks/microblaze/microblaze_c_libraries.ipynb | Zacarhay/RFSoC2x2-PYNQ | 30fcbb2ba3b2486d9fd92d3a9424eff0ac92f7cf | [
"BSD-3-Clause"
] | 13 | 2021-02-25T13:42:43.000Z | 2021-12-18T12:38:34.000Z | 27.351171 | 430 | 0.551357 | [
[
[
"# PYNQ Microblaze Libraries in C\n----",
"_____no_output_____"
],
[
"## Aim/s\n* Explore the various libraries that ship with PYNQ Microblaze.\n* Try the example using the Grove ADC connector.\n* Print from the Microblaze using `pyprintf`.\n\n## References\n* [PYNQ](http://pynq.readthedocs.io)\n* [Grove](https://pynq.readthedocs.io/en/latest/pynq_libraries/grove.html)\n\n## Last revised\n\n* Feb 18, 2021, initial revision\n\n----",
"_____no_output_____"
],
[
"## `pynqmb`\n\nThe main library is `pynqmb` which consists of functions for interacting with a variety of I/O devices. `pynqmb` is split into separate `i2c.h`, `gpio.h`, `spi.h`, `timer.h` and `uart.h` header files with each one being self contained. In this notebook we will look just at the I2C and GPIO headers however the full function reference for all of the components can be found on http://pynq.readthedocs.io \n\nAll of the components follow the same pattern in having `_open` function calls that take one or more pins depending on the protocol. These function use an I/O switch in the subsystem to connect the protocol controller to the output pins. For devices not connected to output pins there are `_open_device` functions which take either the base address of the controller or it's index as defined in the board support package.\n\nFor this example we are going to use a Grove ADC connected via Pmod-Grove adapter and using the I2C protocol. One ancilliary header file that is useful when using the Pmod-Grove adapter is `pmod_grove.h` which includes the pin definitions for the adapter board. In this case we are using the G4 port on the adapter which is connected to pins 6 and 2 of the Pmod connector.\n\nIf the board has labeled PMOD0 and PMOD1, instead of PMODA and PMODB,\nwe will use PMOD0 and PMODA interchangeably, and use PMOD1 and PMODB\ninterchangeably.\n\nYou can find [more information](https://pynq.readthedocs.io/en/latest/pynq_libraries/grove.html)\non Pmod-Grove adapters.",
"_____no_output_____"
]
],
[
[
"from pynq.overlays.base import BaseOverlay\nbase = BaseOverlay('base.bit')",
"_____no_output_____"
]
],
[
[
"In the next cell, `PMOD_G4_B` and `PMOD_G4_A` are 6 and 2, respectively.",
"_____no_output_____"
]
],
[
[
"%%microblaze base.PMODA\n#include <i2c.h>\n#include <pmod_grove.h>\n\nint read_adc() {\n i2c device = i2c_open(PMOD_G4_B, PMOD_G4_A);\n unsigned char buf[2];\n buf[0] = 0;\n i2c_write(device, 0x50, buf, 1);\n i2c_read(device, 0x50, buf, 2);\n return ((buf[0] & 0x0F) << 8) | buf[1];\n}",
"_____no_output_____"
],
[
"read_adc()",
"_____no_output_____"
]
],
[
[
"We can use the `gpio` and `timer` components in concert to flash an LED connected to G1. The `timer` header provides PWM and program delay functionality, although only one can be used simultaneously.",
"_____no_output_____"
]
],
[
[
"%%microblaze base.PMODA\n#include <timer.h>\n#include <gpio.h>\n#include <pmod_grove.h>\n\nvoid flash_led() {\n gpio led = gpio_open(PMOD_G1_A);\n gpio_set_direction(led, GPIO_OUT);\n int state = 0;\n while (1) {\n gpio_write(led, state);\n state = !state;\n delay_ms(500);\n }\n}",
"_____no_output_____"
],
[
"flash_led()",
"_____no_output_____"
]
],
[
[
"----",
"_____no_output_____"
],
[
"## `pyprintf`\n\nThe `pyprint` library exposes a single `pyprintf` function which acts similarly to a regular `printf` function but forwards arguments to Python for formatting and display result in far lower code overhead than a regular printf as well as not requiring access to standard in and out.",
"_____no_output_____"
]
],
[
[
"%%microblaze base.PMODA\n#include <pyprintf.h>\n\nint test_print(float value) {\n pyprintf(\"Printing %f from the microblaze!\\n\", value);\n return 0;\n}",
"_____no_output_____"
],
[
"test_print(1.5)",
"Printing 1.500000 from the microblaze!\n"
]
],
[
[
"At present, `pyprintf` can support the common subset of datatype between Python and C - in particular <strong><tt>%{douxXfFgGeEsc}</tt></strong>. Long data types and additional format modifiers are not supported yet.",
"_____no_output_____"
],
[
"----",
"_____no_output_____"
],
[
"Copyright (C) 2021 Xilinx, Inc\n\nSPDX-License-Identifier: BSD-3-Clause",
"_____no_output_____"
],
[
"----\n\n----",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e75159333f8396bd13db0bc51d865f8d1bb3e48f | 27,418 | ipynb | Jupyter Notebook | Tensorflow/TensorflowPrac15_Making_new_layers_and_model_via_subclassing.ipynb | Vinaypatil-Ev/vinEvPy-GoCoLab | f935ff1e5e08f047410bec6dc9dc26dd79c5ad60 | [
"MIT"
] | null | null | null | Tensorflow/TensorflowPrac15_Making_new_layers_and_model_via_subclassing.ipynb | Vinaypatil-Ev/vinEvPy-GoCoLab | f935ff1e5e08f047410bec6dc9dc26dd79c5ad60 | [
"MIT"
] | null | null | null | Tensorflow/TensorflowPrac15_Making_new_layers_and_model_via_subclassing.ipynb | Vinaypatil-Ev/vinEvPy-GoCoLab | f935ff1e5e08f047410bec6dc9dc26dd79c5ad60 | [
"MIT"
] | null | null | null | 45.46932 | 1,806 | 0.540156 | [
[
[
"<a href=\"https://colab.research.google.com/github/Vinaypatil-Ev/vinEvPy-GoCoLab/blob/main/Tensorflow/TensorflowPrac15_Making_new_layers_and_model_via_subclassing.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"_____no_output_____"
]
],
[
[
"#Making new layers and model via subclassing",
"_____no_output_____"
]
],
[
[
"class CustomLayer(tf.keras.layers.Layer):\r\n def __init__(self, units=32, input_shape=32, name=None):\r\n super(CustomLayer, self).__init__()\r\n winit = tf.random_normal_initializer()\r\n self.w = tf.Variable(winit(shape=(input_shape, units), dtype=\"float32\"),trainable=True)\r\n binit = tf.zeros_initializer() \r\n self.b = tf.Variable(binit(shape=(units, ), dtype=\"float32\"),trainable=True)\r\n \r\n def call(self, inputs):\r\n return tf.matmul(inputs, self.w) + self.b",
"_____no_output_____"
],
[
"CustomLayer(4, 2)(tf.ones((2, 2)))",
"_____no_output_____"
]
],
[
[
"## instead of tf.variable use built in method add_weights",
"_____no_output_____"
]
],
[
[
"class CustomLayer2(tf.keras.layers.Layer):\r\n def __init__(self, units=32, input_shape=32, name=None):\r\n super(CustomLayer, self).__init__()\r\n self.w = self.add_weight(shape=(input_shape, units), initializer=\"random_normal\", trainable=True)\r\n self.b = self.add_weight(shape=(units,), initializer=\"zeros\", trainable=True)\r\n \r\n def call(self, inputs):\r\n return tf.matmul(inputs, self.w) + self.b",
"_____no_output_____"
],
[
"CustomLayer(4, 2)(tf.ones((2, 2)))",
"_____no_output_____"
]
],
[
[
"## for unknown input shape use buid method",
"_____no_output_____"
]
],
[
[
"class CustomLayer3(tf.keras.layers.Layer):\r\n def __init__(self, units):\r\n super(CustomLayer3, self).__init__()\r\n self.units = units\r\n \r\n def build(self, input_shape):\r\n self.w = self.add_weight(\r\n shape=(input_shape[-1], self.units),\r\n initializer = \"random_normal\",\r\n trainable = True)\r\n \r\n self.b = self.add_weight(\r\n shape=(self.units, ),\r\n initializer = \"zeros\", \r\n trainable = True\r\n )\r\n \r\n def call(self, inputs):\r\n return tf.matmul(inputs, self.w) + self.b",
"_____no_output_____"
],
[
"CustomLayer3(4)(tf.ones((2, 2)))",
"_____no_output_____"
]
],
[
[
"## Layer of composite layers",
"_____no_output_____"
]
],
[
[
"class CustomCompositeLayer(tf.keras.layers.Layer):\r\n def __init__(self, units=1):\r\n super(CustomCompositeLayer, self).__init__()\r\n self.l1 = CustomLayer3(32)\r\n self.l2 = CustomLayer3(32)\r\n self.l3 = CustomLayer3(units)\r\n \r\n def call(self, inputs):\r\n x = self.l1(inputs)\r\n x = tf.nn.relu(x)\r\n x = self.l2(x)\r\n x = tf.nn.relu(x)\r\n return self.l3(x)",
"_____no_output_____"
],
[
"CustomCompositeLayer(4)(tf.ones((2, 2)))",
"_____no_output_____"
]
],
[
[
"## add_loss method in call",
"_____no_output_____"
]
],
[
[
"class ActivityRegularizer(tf.keras.layers.Layer):\r\n def __init__(self, rate):\r\n super(ActivityRegularizer, self).__init__()\r\n self.rate = rate \r\n\r\n def call(self, inputs):\r\n self.add_loss(self.rate * tf.reduce_sum(inputs))\r\n return inputs\r\n",
"_____no_output_____"
],
[
"class LayerWithKernelRegularizer(tf.keras.layers.Layer):\r\n def __init__(self, units):\r\n super(LayerWithKernelRegularizer, self).__init__()\r\n self.dense = tf.keras.layers.Dense(units, kernel_regularizer=tf.keras.regularizers.l2(1e-2))\r\n\r\n def call(self, inputs):\r\n return self.dense(inputs)",
"_____no_output_____"
],
[
"l = LayerWithKernelRegularizer(4)\r\nl(tf.ones((2, 2)))",
"_____no_output_____"
],
[
"l.losses",
"_____no_output_____"
]
],
[
[
"## Auto encoder model",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"class Sampling(tf.keras.layers.Layer):\r\n def call(self, inputs):\r\n x, y = inputs\r\n batch = tf.shape(x)[0]\r\n dim = tf.shape(x)[1]\r\n epsilon = tf.keras.backend.random_normal(shape=(batch, dim))\r\n return x + tf.exp(0.5 * y) * epsilon\r\n\r\nclass Encoder(tf.keras.layers.Layer):\r\n def __init__(self, outerdim, innerdim, name=\"Encoder\", **kwargs):\r\n super(Encoder, self).__init__(name=name, **kwargs)\r\n self.In = tf.keras.layers.Dense(outerdim, activation=\"relu\")\r\n self.elr1 = tf.keras.layers.Dense(innerdim)\r\n self.elr2 = tf.keras.layers.Dense(innerdim)\r\n self.sampling = Sampling()\r\n \r\n def call(self, inputs):\r\n x = self.In(inputs)\r\n zmean = self.elr1(x)\r\n zvar = self.elr2(x)\r\n z = Sampling((zmean, zvar))\r\n return zmean, zvar, z",
"_____no_output_____"
],
[
"class Decoder(tf.keras.layers.Layer):\r\n def __init__(self, originaldim, outerdim, name=\"Decoder\", **kwargs):\r\n super(Decoder, self).__init__(name=name, **kwargs)\r\n self.dlr1 = tf.keras.layers.Dense(outerdim, activation=\"relu\")\r\n self.Out = tf.keras.layers.Dense(originaldim, activation=\"sigmoid\") \r\n \r\n def call(self, inputs):\r\n x = self.dlr1(inputs)\r\n return self.Out(x)",
"_____no_output_____"
],
[
"class VariationAutoEncoder(tf.keras.Model):\r\n def __init__(self,originaldim, outerdim, innerdim, name=\"VAE\", **kwargs):\r\n super(VariationAutoEncoder, self).__init__(name=name, **kwargs)\r\n self.originaldim = originaldim\r\n self.encoders = Encoder(outerdim, innerdim)\r\n self.decoders = Decoder(originaldim, outerdim)\r\n \r\n def call(self, inputs):\r\n zmean, zvar, z = self.encoders(inputs)\r\n reconstructed = self.decoders(z)\r\n\r\n kl_loss = -0.5 * tf.reduce_mean(zvar, tf.square(zmean) - tf.exp(zvar) + 1)\r\n\r\n self.add_loss(kl_loss)\r\n return reconstructed",
"_____no_output_____"
],
[
"(xtrn, _), (_, _) = tf.keras.datasets.mnist.load_data()\r\nxtrn = xtrn.reshape(60000, 784).astype(\"float32\") / 255\r\ntrn_data = tf.data.Dataset.from_tensor_slices(xtrn)\r\ntrn_data = trn_data.shuffle(1024).batch(64)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"originaldim = 748\r\nouterdim = 64\r\ninnerdim = 32\r\n\r\nmse_loss = tf.keras.losses.MeanSquaredError()\r\nloss_metric = tf.keras.metrics.Mean()\r\noptimizer = tf.keras.optimizers.Adam(1e-3)\r\nvae = VariationAutoEncoder(784, 64, 32)\r\n\r\nepochs = 2\r\n\r\nfor epoch in range(epochs):\r\n print(f\"epoch: {epoch}\")\r\n for steps, x_batch_train in enumerate(trn_data):\r\n with tf.GradientTape() as tape:\r\n reconstructed = vae(x_batch_train)\r\n loss = mse(x_batch_train, reconstructed)\r\n loss += sum(vae.losses)\r\n \r\n grad = tape.gradient(loss, vae.trainable_variables)\r\n optimizer.apply_gradients(zip(grad, vae.trainable_variables))\r\n\r\n loss_metric(loss)\r\n\r\n if steps % 100 == 0:\r\n print(\"step %d: mean loss = %0.4f\" % (steps, loss_metric.result()))",
"epoch: 0\n"
],
[
"tf.__version__",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7516950ccf8d812eb5c0854d0f3fb564a176c45 | 47,140 | ipynb | Jupyter Notebook | MRNet_fastai_example.ipynb | nswitanek/mrnet-fastai | dfc7cbeb74ada91641b46cdbf38e2c85955402eb | [
"Apache-2.0"
] | 16 | 2019-04-17T20:46:58.000Z | 2021-01-09T02:43:50.000Z | MRNet_fastai_example.ipynb | nswitanek/mrnet-fastai | dfc7cbeb74ada91641b46cdbf38e2c85955402eb | [
"Apache-2.0"
] | 10 | 2019-04-18T04:43:28.000Z | 2019-05-17T15:47:43.000Z | MRNet_fastai_example.ipynb | nswitanek/mrnet-fastai | dfc7cbeb74ada91641b46cdbf38e2c85955402eb | [
"Apache-2.0"
] | 7 | 2019-04-18T00:00:22.000Z | 2021-08-22T17:31:17.000Z | 58.925 | 24,620 | 0.719644 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom fastai.vision import *\nimport torch\n\nfrom mrnet_orig import *\n\n%matplotlib inline",
"_____no_output_____"
],
[
"! tree -d ..",
"\u001b[01;34m..\u001b[00m\r\n├── \u001b[01;34mdata\u001b[00m\r\n│ ├── \u001b[01;34maxial\u001b[00m\r\n│ │ ├── \u001b[01;34mtrain\u001b[00m\r\n│ │ └── \u001b[01;34mvalid\u001b[00m\r\n│ ├── \u001b[01;34mcoronal\u001b[00m\r\n│ │ ├── \u001b[01;34mtrain\u001b[00m\r\n│ │ └── \u001b[01;34mvalid\u001b[00m\r\n│ └── \u001b[01;34msagittal\u001b[00m\r\n│ ├── \u001b[01;34mmodels\u001b[00m\r\n│ ├── \u001b[01;34mtrain\u001b[00m\r\n│ └── \u001b[01;34mvalid\u001b[00m\r\n└── \u001b[01;34mmrnet-fastai\u001b[00m\r\n ├── \u001b[01;34mexp\u001b[00m\r\n └── \u001b[01;34m__pycache__\u001b[00m\r\n\r\n14 directories\r\n"
],
[
"! ls",
"df_abnl.pkl loss_weights.pt\t\t mrnet_orig.py\tslice_stats.json\r\nexp\t MRNet_EDA.ipynb\t\t __pycache__\ttrain_cases.pkl\r\nLICENSE MRNet_fastai_example.ipynb README.md\ttrain_pix_distr.pkl\r\n"
],
[
"! ls ../data",
"axial\t train-abnormal.csv valid-abnormal.csv\r\ncoronal train-acl.csv valid-acl.csv\r\nsagittal train-meniscus.csv valid-meniscus.csv\r\n"
],
[
"data_path = Path('../data')\nsag_path = data_path/'sagittal'\ncor_path = data_path/'coronal'\nax_path = data_path/'axial'",
"_____no_output_____"
]
],
[
[
"## Substantial class imbalance for the normal/abnormal task\n\nGiven this, we'll derive weights for a weighted binary cross entropy loss function.",
"_____no_output_____"
]
],
[
[
"train_abnl = pd.read_csv(data_path/'train-abnormal.csv', header=None,\n names=['Case', 'Abnormal'], \n dtype={'Case': str, 'Abnormal': np.int64})\nprint(train_abnl.shape)\ntrain_abnl.head()",
"(1130, 2)\n"
],
[
"w = train_abnl.Abnormal.sum() / train_abnl.shape[0]\nprint(w)\nweights = Tensor([w, 1-w])\nprint(weights)\ntorch.save(weights, 'loss_weights.pt')",
"0.8079646017699115\ntensor([0.8080, 0.1920])\n"
],
[
"weights = torch.load('loss_weights.pt')",
"_____no_output_____"
]
],
[
[
"## Load previously created files\n\n- `df_abnl` -> master `df` for use with Data Block API, also contains # of slices per series\n- `slice_stats` -> `dict` stored as `json` with mean and max # of slices per series",
"_____no_output_____"
]
],
[
[
"df_abnl = pd.read_pickle('df_abnl.pkl')\ndf_abnl.head()",
"_____no_output_____"
],
[
"with open('slice_stats.json', 'r') as file:\n stats = json.load(file)\n \nstats",
"_____no_output_____"
],
[
"max_slc = stats['sagittal']['max']\nprint(max_slc)",
"51\n"
]
],
[
[
"## MRNet implementation\n\nModified from the original [paper](https://journals.plos.org/plosmedicine/article?id=10.1371/journal.pmed.1002699) to (sort of) work with `fastai`",
"_____no_output_____"
]
],
[
[
"il = MR3DImageList.from_df(df_abnl, sag_path, suffix='.npy')",
"_____no_output_____"
],
[
"il.items[0]",
"_____no_output_____"
],
[
"il",
"_____no_output_____"
],
[
"sd = il.split_from_df(col=2)\nsd",
"_____no_output_____"
],
[
"ll = sd.label_from_df(cols=1)\nll",
"_____no_output_____"
],
[
"# tfms = get_transforms()",
"_____no_output_____"
],
[
"bs = 1\ndata = ll.databunch(bs=bs)",
"_____no_output_____"
],
[
"learn = mrnet_learner(data, MRNet(), opt_func=optim.Adam, loss_func=WtBCELoss(weights),\n callbacks=MRNetCallback(), metrics=accuracy)",
"_____no_output_____"
],
[
"learn.summary()",
"_____no_output_____"
],
[
"learn.lr_find()",
"_____no_output_____"
],
[
"learn.recorder.plot()",
"_____no_output_____"
],
[
"learn.fit_one_cycle(1, 3e-4)",
"_____no_output_____"
]
],
[
[
"Accuracy is terrible, but what do you expect out of a single linear layer...?",
"_____no_output_____"
]
],
[
[
"learn.unfreeze()",
"_____no_output_____"
],
[
"learn.summary()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7516db9e3dae1bf9e3b26cc0494867e88712daa | 176,305 | ipynb | Jupyter Notebook | styling/effective_matplotlib.ipynb | TillMeineke/machine_learning | 689ca5e9e4450266786fab73299e1f8bdad7a473 | [
"MIT"
] | null | null | null | styling/effective_matplotlib.ipynb | TillMeineke/machine_learning | 689ca5e9e4450266786fab73299e1f8bdad7a473 | [
"MIT"
] | null | null | null | styling/effective_matplotlib.ipynb | TillMeineke/machine_learning | 689ca5e9e4450266786fab73299e1f8bdad7a473 | [
"MIT"
] | null | null | null | 309.850615 | 46,042 | 0.915697 | [
[
[
"# https://pbpython.com/effective-matplotlib.html",
"_____no_output_____"
]
],
[
[
"# setup imports and read in some data:\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\n\n#%matplotlib notebook\n\ndf = pd.read_excel(\"https://github.com/chris1610/pbpython/blob/master/data/sample-salesv3.xlsx?raw=true\")\ndf.head()",
"_____no_output_____"
],
[
"# summarize the data so we can see the total number of purchases \n# and total sales for the top 10 customers.\n# rename columns for clarity during plots.\n\ntop_10 = (df.groupby('name')['ext price', 'quantity'].agg({'ext price': 'sum', 'quantity': 'count'})\n .sort_values(by='ext price', ascending=False))[:10].reset_index()\ntop_10.rename(columns={'name': 'Name', 'ext price': 'Sales', 'quantity': 'Purchases'}, inplace=True)",
"/var/folders/r8/zdlnr35s6qz6zx67nmc9bdnm0000gn/T/ipykernel_7962/3271363455.py:5: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.\n top_10 = (df.groupby('name')['ext price', 'quantity'].agg({'ext price': 'sum', 'quantity': 'count'})\n"
],
[
"top_10",
"_____no_output_____"
],
[
"# show different styles available on system\nplt.style.available",
"_____no_output_____"
],
[
"# using a style\nplt.style.use('fivethirtyeight')",
"_____no_output_____"
],
[
"# plot the data using the standard pandas plotting function\ntop_10.plot(kind='barh', y=\"Sales\", x=\"Name\")",
"_____no_output_____"
],
[
"def currency(x, pos):\n 'The two args are the value and tick position'\n if x >= 1000000:\n return '${:1.1f}M'.format(x*1e-6)\n return '${:1.0f}K'.format(x*1e-3)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(6, 7))\ntop_10.plot(kind='barh', y=\"Sales\", x=\"Name\", ax=ax)\nax.set_xlim([-10000, 140000])\n#ax.set_xlabel('Total Revenue')\n#ax.set_ylabel('Customer');\nax.set(title='2014 Revenue', xlabel='Total Revenue', ylabel='Customer')\nformatter = FuncFormatter(currency)\nax.xaxis.set_major_formatter(formatter)\nax.legend().set_visible(False)\n",
"_____no_output_____"
],
[
"# Create the figure and the axes\nfig, ax = plt.subplots()\n\n# Plot the data and get the averaged\ntop_10.plot(kind='barh', y=\"Sales\", x=\"Name\", ax=ax)\navg = top_10['Sales'].mean()\n\n# Set limits and labels\nax.set_xlim([-10000, 140000])\nax.set(title='2014 Revenue', xlabel='Total Revenue', ylabel='Customer')\n\n# Add a line for the average\nax.axvline(x=avg, color='b', label='Average', linestyle='--', linewidth=1)\n\n# Annotate the new customers\nfor cust in [3, 5, 8]:\n ax.text(115000, cust, \"New Customer\")\n\n# Format the currency\nformatter = FuncFormatter(currency)\nax.xaxis.set_major_formatter(formatter)\n\n# Hide the legend\nax.legend().set_visible(False)",
"_____no_output_____"
],
[
"# Get the figure and the axes\nfig, (ax0, ax1) = plt.subplots(nrows=1,ncols=2, sharey=True, figsize=(7, 4))\ntop_10.plot(kind='barh', y=\"Sales\", x=\"Name\", ax=ax0)\nax0.set_xlim([-10000, 140000])\nax0.set(title='Revenue', xlabel='Total Revenue', ylabel='Customers')\n\n# Plot the average as a vertical line\navg = top_10['Sales'].mean()\nax0.axvline(x=avg, color='b', label='Average', linestyle='--', linewidth=1)\n\n# Repeat for the unit plot\ntop_10.plot(kind='barh', y=\"Purchases\", x=\"Name\", ax=ax1)\navg = top_10['Purchases'].mean()\nax1.set(title='Units', xlabel='Total Units', ylabel='')\nax1.axvline(x=avg, color='b', label='Average', linestyle='--', linewidth=1)\n\n# Title the figure\nfig.suptitle('2014 Sales Analysis', fontsize=14, fontweight='bold');\n\n# Hide the legends\nax1.legend().set_visible(False)\nax0.legend().set_visible(False)",
"_____no_output_____"
],
[
"fig.canvas.get_supported_filetypes()",
"_____no_output_____"
],
[
"fig.savefig('data/sales2.png', transparent=False, dpi=80, bbox_inches=\"tight\")\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7517635aed66177baa7a3cec6fd2816267ffeda | 251,205 | ipynb | Jupyter Notebook | analysis/tuned_1/summary.ipynb | stevester94/csc500-notebooks | 4c1b04c537fe233a75bed82913d9d84985a89177 | [
"MIT"
] | null | null | null | analysis/tuned_1/summary.ipynb | stevester94/csc500-notebooks | 4c1b04c537fe233a75bed82913d9d84985a89177 | [
"MIT"
] | null | null | null | analysis/tuned_1/summary.ipynb | stevester94/csc500-notebooks | 4c1b04c537fe233a75bed82913d9d84985a89177 | [
"MIT"
] | null | null | null | 244.600779 | 214,788 | 0.880671 | [
[
[
"import pandas as pd\nimport json\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport matplotlib\n\n\n\nfrom steves_utils.summary_utils import (\n get_experiments_from_path\n)\n\nfrom steves_utils.utils_v2 import (\n get_experiments_base_path\n)",
"_____no_output_____"
],
[
"experiments_to_get = [\n \"oracle.run2\",\n \"oracle.run1\",\n \"cores\",\n \"metehan\",\n \"wisig\",\n \"oracle.run1.framed\",\n \"oracle.run2.framed\",\n]\n\nexperiments = {}\n\nfor experiment in experiments_to_get:\n print(experiment)\n experiments[experiment] = get_experiments_from_path(\n os.path.join(get_experiments_base_path(), \"tuned_1\", experiment)\n )",
"oracle.run2\noracle.run1\ncores\nmetehan\nwisig\noracle.run1.framed\noracle.run2.framed\n"
],
[
"all_trials = pd.DataFrame(columns=[\n \"experiment_name\",\n \"source_val_label_accuracy\",\n \"target_val_label_accuracy\",\n \"x_transforms_source\"\n])\n\nfor experiment in experiments_to_get: \n for trial in experiments[experiment]:\n f = pd.DataFrame(trial[\"results\"])\n f[\"experiment_name\"] = experiment\n f[\"x_transforms_source\"] = str(trial[\"parameters\"][\"x_transforms_source\"])\n f = f[all_trials.columns]\n \n f = f.iloc[0] # Unknown why, but pandas is repeating trials for each domain in the trial!\n \n \n all_trials = all_trials.append(f)\n \nall_trials = all_trials.reset_index(drop=True)\nall_trials",
"_____no_output_____"
],
[
"m = pd.melt(all_trials, \n id_vars=[\"experiment_name\", \"x_transforms_source\"], \n value_vars=[\n \"source_val_label_accuracy\",\n \"target_val_label_accuracy\",\n ])\nm",
"_____no_output_____"
],
[
"matplotlib.rcParams.update({'font.size': 22})\nplt.style.use('seaborn-whitegrid')\n\nfg = sb.catplot(x='x_transforms_source', col=\"experiment_name\", y='value', hue='variable', \n data=m, kind='bar', height=5, aspect=3, col_wrap=2, edgecolor = \"black\")\nfg.set_xlabels('')\nplt.figure(figsize=(15,50))\n\n# iterate through axes\nfor ax in fg.axes.ravel():\n \n ax.tick_params(labelbottom=True)\n \n # add annotations\n for c in ax.containers:\n labels = [f'{(v.get_height()):.2f}' for v in c]\n ax.bar_label(c, labels=labels, label_type='edge')\n ax.margins(y=0.2)\n\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e751769da16b08ab8c35760a1909ec5720e9d3d9 | 89,146 | ipynb | Jupyter Notebook | 2021_2022/live/01_linear_regression.ipynb | MATF-RI/Materijali-sa-vezbi | f4d64177c3cd903cd2123b90876b98e52cb0db36 | [
"MIT"
] | null | null | null | 2021_2022/live/01_linear_regression.ipynb | MATF-RI/Materijali-sa-vezbi | f4d64177c3cd903cd2123b90876b98e52cb0db36 | [
"MIT"
] | null | null | null | 2021_2022/live/01_linear_regression.ipynb | MATF-RI/Materijali-sa-vezbi | f4d64177c3cd903cd2123b90876b98e52cb0db36 | [
"MIT"
] | null | null | null | 54.060643 | 18,764 | 0.762165 | [
[
[
"# Linearna algebra",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"A = np.array([[1,2,3],\n [4,5,6]])",
"_____no_output_____"
],
[
"A",
"_____no_output_____"
],
[
"A.shape",
"_____no_output_____"
],
[
"E = np.eye(5)\nE",
"_____no_output_____"
],
[
"np.zeros((2,3))",
"_____no_output_____"
],
[
"ones = np.ones((2,3))\nones",
"_____no_output_____"
],
[
"-1 * ones",
"_____no_output_____"
],
[
"5 + ones",
"_____no_output_____"
],
[
"v = np.array([1,2,3])\nones + v",
"_____no_output_____"
],
[
"np.sin(A)",
"_____no_output_____"
],
[
"B = np.array([[1,2,1],[2,2,3],[4,5,5]])\nB.shape",
"_____no_output_____"
],
[
"A.shape",
"_____no_output_____"
],
[
"C = np.ones((3,3))",
"_____no_output_____"
],
[
"B + C",
"_____no_output_____"
],
[
"B * C == B",
"_____no_output_____"
],
[
"A.dot(B)",
"_____no_output_____"
],
[
"B",
"_____no_output_____"
],
[
"v = B[:,0]\nv",
"_____no_output_____"
],
[
"v = np.array([1,2,3,4,5,6,7])",
"_____no_output_____"
],
[
"v[::2]",
"_____no_output_____"
],
[
"v[::-1]",
"_____no_output_____"
],
[
"from numpy import linalg as LA",
"_____no_output_____"
],
[
"LA.det(B)",
"_____no_output_____"
],
[
"LA.det(np.ones((2,2)))",
"_____no_output_____"
],
[
"LA.inv(B)",
"_____no_output_____"
],
[
"LA.norm(v)",
"_____no_output_____"
],
[
"LA.norm(B, ord='fro')",
"_____no_output_____"
],
[
"LA.cond(B)",
"_____no_output_____"
],
[
"LA.norm(B) * LA.norm(LA.inv(B))",
"_____no_output_____"
],
[
"LA.cond(np.ones((2,2)))",
"_____no_output_____"
],
[
"LA.cond(np.array([[1,1],\n [1,0.999]]))",
"_____no_output_____"
],
[
"LA.eig(B)",
"_____no_output_____"
]
],
[
[
"# Linearna regresija",
"_____no_output_____"
]
],
[
[
"# Ax = b\nA = np.array([[2,0],\n [-1,1],\n [0,2]])\nb = np.array([2,0,-2])",
"_____no_output_____"
],
[
"x = LA.inv(A.T.dot(A)).dot(A.T).dot(b)\nx",
"_____no_output_____"
],
[
"x, rss, _, _ = LA.lstsq(A, b, rcond=None)",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
],
[
"rss",
"_____no_output_____"
]
],
[
[
"Zadatak 1\n\nOdrediti koeficijente w0 i w1 tako da funkcija 𝑓(𝑥)=w0+w1𝑥 u smislu metode najmanjih kvadrata najbolje aproksimira skup tačaka (0,1.2), (0.5,2.05), (1,2.9) i (−0.5,0.1) u ravni.\n",
"_____no_output_____"
]
],
[
[
"# Aw = w0 + w1x\nx = np.array([0, 0.5, 1, -0.5])\ny = np.array([1.2, 2.05, 2.9, 0.1])\nones = np.ones(4)\nA = np.vstack((ones, x)).T\n\nLA.lstsq(A, y, rcond=None)",
"_____no_output_____"
]
],
[
[
"Zadatak 2\n\nOdrediti vrednosti koeficijenata 𝑎 i 𝑏 tako da funkcija 𝑓(𝑥)=𝑎+𝑏sin𝑥 u smislu metode najmanjih kvadrata aproksimira skup tacaka (2,2.6), (−1.22,−1.7), (8.32,2.5) i (4.23,−1.6) u ravni. Dati ocenu greske. Prikazati skup tačaka i nacrtati rezultujucu funkciju.",
"_____no_output_____"
]
],
[
[
"x = np.array([2,-1.22,8.32,4.23])\ny = np.array([2.6,-1.7,2.5,-1.6])\n\nA = np.vstack((ones, np.sin(x))).T\n\nsolution, rss, _, _ = LA.lstsq(A, y, rcond=None)\na, b = solution\nprint(a, b)",
"0.4629244420449795 2.315513480740615\n"
],
[
"from matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"xs = np.linspace(-5, 10, 100)\nplt.plot(xs, a + b * np.sin(xs))\nplt.plot(x, y, 'o')",
"_____no_output_____"
]
],
[
[
"Zadatak 3\n\nU datoteci social_reach.csv se nalaze cene reklamiranja za različite demografske grupe, koje su date u hiljadama evra za 1000 pregleda. Svaka od tri kolone označava različitu platformu za reklamiranje (na primer, platforme mogu biti Facebook, Instagram ili YouTube). Svaki red označava različitu demografsku grupu, koja može npr. biti posebna država u kojoj se reklama plasira. Potrebno je odrediti iznos sredstava da se ostvari približno milion pregleda za svaku demografsku grupu, gledajući po svim platformama ukupno.",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv('social_reach.csv')\ndf",
"_____no_output_____"
],
[
"y = 1000 * np.ones(10)\ny",
"_____no_output_____"
],
[
"A = df[['web1', 'web2', 'web3']]\nA",
"_____no_output_____"
],
[
"LA.lstsq(A, y, rcond=None)",
"_____no_output_____"
]
],
[
[
"Zadatak 4\n\nSvaki red u fajlu advertising.csv sadrži informacije o cenama u hiljadama dolara reklamnih usluga na određenom tržištu. Prva kolona se odnosi na cene reklamiranja na televiziji, druga na radiju, a treća u novinama. Četvrta kolona se odnosi na ukupnu prodaju proizvoda koji su se reklamirali na datim medijima. Kreirati linearni model koji predviđa prodaju na osnovu cena reklamiranja.",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"import pandas as pd\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"df = pd.read_csv('advertising.csv')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"tv = df['TV']\nsales = df['Sales']\nplt.scatter(tv, sales)",
"_____no_output_____"
],
[
"radio = df['Radio']\nplt.scatter(radio, sales)",
"_____no_output_____"
],
[
"newspaper = df['Newspaper']\nplt.scatter(newspaper, sales)",
"_____no_output_____"
],
[
"X = df[['TV', 'Radio', 'Newspaper']]\ny = df['Sales']",
"_____no_output_____"
],
[
"model = LinearRegression()",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"model.coef_",
"_____no_output_____"
],
[
"model.intercept_",
"_____no_output_____"
],
[
"2.74 + 0.05*TV + 0.19 * Radio - 0.001 * Novine",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score",
"_____no_output_____"
],
[
"y_pred = model.predict(X_test)",
"_____no_output_____"
],
[
"mean_squared_error(y_test, y_pred)",
"_____no_output_____"
],
[
"r2_score(y_test, y_pred)",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score",
"_____no_output_____"
],
[
"scores = cross_val_score(model, X_train, y_train, cv=5)\nscores.mean()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7518294ebe3132097b71b98295181fb1760ae2f | 81,018 | ipynb | Jupyter Notebook | dog_app.ipynb | blackcisne10/Dog-Breed-Classifier | a2c38ae7a15952beec0e17f2320a3741a22da293 | [
"MIT"
] | 1 | 2020-02-05T16:31:38.000Z | 2020-02-05T16:31:38.000Z | dog_app.ipynb | blackcisne10/Dog-Breed-Classifier | a2c38ae7a15952beec0e17f2320a3741a22da293 | [
"MIT"
] | null | null | null | dog_app.ipynb | blackcisne10/Dog-Breed-Classifier | a2c38ae7a15952beec0e17f2320a3741a22da293 | [
"MIT"
] | null | null | null | 50.731371 | 1,737 | 0.599361 | [
[
[
"# Convolutional Neural Networks\n\n## Project: Write an Algorithm for a Dog Identification App \n\n---\n\nIn this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! \n\n> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.\n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n\nThe rubric contains _optional_ \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. If you decide to pursue the \"Stand Out Suggestions\", you should include the code in this Jupyter notebook.\n\n\n\n---\n### Why We're Here \n\nIn this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). \n\n\n\nIn this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience!\n\n### The Road Ahead\n\nWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.\n\n* [Step 0](#step0): Import Datasets\n* [Step 1](#step1): Detect Humans\n* [Step 2](#step2): Detect Dogs\n* [Step 3](#step3): Create a CNN to Classify Dog Breeds (from Scratch)\n* [Step 4](#step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)\n* [Step 5](#step5): Write your Algorithm\n* [Step 6](#step6): Test Your Algorithm\n\n---\n<a id='step0'></a>\n## Step 0: Import Datasets\n\nMake sure that you've downloaded the required human and dog datasets:\n* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. \n\n* Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. \n\n*Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*\n\nIn the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom glob import glob\n\n# load filenames for human and dog images\nhuman_files = np.array(glob(\"lfw/*/*\"))\ndog_files = np.array(glob(\"dogImages/*/*/*\"))\n\n# print number of images in each dataset\nprint('There are %d total human images.' % len(human_files))\nprint('There are %d total dog images.' % len(dog_files))",
"There are 13233 total human images.\nThere are 8351 total dog images.\n"
]
],
[
[
"<a id='step1'></a>\n## Step 1: Detect Humans\n\nIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. \n\nOpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image.",
"_____no_output_____"
]
],
[
[
"import cv2 \nimport matplotlib.pyplot as plt \n%matplotlib inline \n\n# extract pre-trained face detector\nface_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\n# load color (BGR) image\nimg = cv2.imread(human_files[4])\n# convert BGR image to grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# find faces in image\nfaces = face_cascade.detectMultiScale(gray)\n\n# print number of faces detected in the image\nprint('Number of faces detected:', len(faces))\n\n# get bounding box for each detected face\nfor (x,y,w,h) in faces:\n # add bounding box to color image\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n \n# convert BGR image to RGB for plotting\ncv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n# display the image, along with bounding box\nplt.imshow(cv_rgb)\nplt.show()",
"_____no_output_____"
],
[
"import cv2 \nimport matplotlib.pyplot as plt \n%matplotlib inline \n\n# extract pre-trained face detector\nface_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\n# load color (BGR) image\nimg = cv2.imread(dog_files[10])\n# convert BGR image to grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# find faces in image\nfaces = face_cascade.detectMultiScale(gray)\n\n# print number of faces detected in the image\nprint('Number of faces detected:', len(faces))\n\n# get bounding box for each detected face\nfor (x,y,w,h) in faces:\n # add bounding box to color image\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n \n# convert BGR image to RGB for plotting\ncv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n# display the image, along with bounding box\nplt.imshow(cv_rgb)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. \n\nIn the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box.\n\n### Write a Human Face Detector\n\nWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below.",
"_____no_output_____"
]
],
[
[
"# returns \"True\" if face is detected in image stored at img_path\ndef face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Assess the Human Face Detector\n\n__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. \n- What percentage of the first 100 images in `human_files` have a detected human face? \n- What percentage of the first 100 images in `dog_files` have a detected human face? \n\nIdeally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`.",
"_____no_output_____"
]
],
[
[
"human_files_short = human_files[:100]\ndog_files_short = dog_files[:100]\n\nprint(len(human_files_short))\nprint(len(dog_files_short))",
"_____no_output_____"
]
],
[
[
"__Answer:__ \n(You can print out your results and/or write your percentages in this cell)",
"_____no_output_____"
]
],
[
[
"from tqdm import tqdm\n\nhuman_files_short = human_files[:100]\ndog_files_short = dog_files[:100]\n\n#-#-# Do NOT modify the code above this line. #-#-#\n\n## TODO: Test the performance of the face_detector algorithm \n## on the images in human_files_short and dog_files_short.\nhuman_detected = 0\ndog_detected = 0\n\nnum_files = len(human_files_short)\n\nfor i in range(0, num_files):\n human_path = human_files_short[i]\n dog_path = dog_files_short[i]\n \n if face_detector(human_path) == True:\n human_detected += 1\n if face_detector(dog_path) == True:\n dog_detected +=1\n \nprint('Haar Face Detection')\nprint('The percentage of the detected face - Human: {0:.0%}'.format(human_detected/num_files))\nprint('The percentage of the detected face - Dogs: {0:.0%}'.format(dog_detected/num_files))",
"_____no_output_____"
]
],
[
[
"We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.",
"_____no_output_____"
]
],
[
[
"### (Optional) \n### TODO: Test performance of another face detection algorithm.\n### Feel free to use as many code cells as needed.",
"_____no_output_____"
]
],
[
[
"---\n<a id='step2'></a>\n## Step 2: Detect Dogs\n\nIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. \n\n### Obtain Pre-trained VGG-16 Model\n\nThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ",
"_____no_output_____"
]
],
[
[
"import torch\nimport torchvision.models as models\n\n# define VGG16 model\nVGG16 = models.vgg16(pretrained=True)\n\n# check if CUDA is available\nuse_cuda = torch.cuda.is_available()\n\n# move model to GPU if CUDA is available\nif use_cuda:\n VGG16 = VGG16.cuda()",
"_____no_output_____"
],
[
"use_cuda",
"_____no_output_____"
],
[
"VGG16",
"_____no_output_____"
]
],
[
[
"Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image.",
"_____no_output_____"
],
[
"### (IMPLEMENTATION) Making Predictions with a Pre-trained Model\n\nIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.\n\nBefore writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html).",
"_____no_output_____"
]
],
[
[
"from PIL import Image\nimport torchvision.transforms as transforms\n\n# Set PIL to be tolerant of image files that are truncated.\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ndef VGG16_predict(img_path):\n '''\n Use pre-trained VGG-16 model to obtain index corresponding to \n predicted ImageNet class for image at specified path\n \n Args:\n img_path: path to an image\n \n Returns:\n Index corresponding to VGG-16 model's prediction\n '''\n \n ## COMPLETED: Complete the function.\n ## Load and pre-process an image from the given img_path\n img = Image.open(img_path)\n \n # VGG-16 takes 224x224 images as input, resize\n # Convert PIL image to Tensor\n # Normalize input images to make its elements from 0 to 1\n data_transform = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n \n ])\n \n # Apply the transformation above, preprocess image to 4D Tensor(.unsqueeze(0) adds a dimension)\n img_tensor = data_transform(img).unsqueeze_(0)\n \n # Move tensor to GPU if available\n if use_cuda:\n img_tensor = img_tensor.cuda()\n \n # Turn on evaluation mode\n VGG16.eval()\n \n # Get predicted category for image\n with torch.no_grad():\n output = VGG16(img_tensor)\n prediction = torch.argmax(output).item()\n \n ## Return the *index* of the predicted class for that image\n \n VGG16.train()\n return prediction # predicted class index",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Write a Dog Detector\n\nWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).\n\nUse these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).",
"_____no_output_____"
]
],
[
[
"### returns \"True\" if a dog is detected in the image stored at img_path\ndef dog_detector(img_path):\n ## COMPLETED: Complete the function.\n \n prediction = VGG16_predict(img_path)\n \n return True if 151 <= prediction <= 268 else False # true/false",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Assess the Dog Detector\n\n__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. \n- What percentage of the images in `human_files_short` have a detected dog? \n- What percentage of the images in `dog_files_short` have a detected dog?",
"_____no_output_____"
],
[
"__Answer:__ \n",
"_____no_output_____"
]
],
[
[
"### COMPLETED: Test the performance of the dog_detector function\n### on the images in human_files_short and dog_files_short.\n# human_files_short\nhuman_detected = 0\ndog_detected = 0\n\nnum_files = len(human_files_short)\n\nfor i in range(0, num_files):\n human_path = human_files_short[i]\n dog_path = dog_files_short[i]\n \n if dog_detector(human_path) == True:\n human_detected += 1\n if dog_detector(dog_path) == True:\n dog_detected +=1\n \nprint('VGG16')\nprint('The percentage of the detected face - Human: {0:.0%}'.format(human_detected/num_files))\nprint('The percentage of the detected face - Dogs: {0:.0%}'.format(dog_detected/num_files))",
"_____no_output_____"
]
],
[
[
"We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.html#inception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.html#id3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.",
"_____no_output_____"
]
],
[
[
"### (Optional) \n### COMPLETED: Report the performance of another pre-trained network.\n### Feel free to use as many code cells as needed.\nResNet50 = models.resnet50(pretrained=True)\n\nif use_cuda:\n ResNet50.cuda()\n# Performance variables\nhuman_files_ResNet50 = 0\ndogs_files_ResNet50 = 0\n\nnum_files = len(human_files_short)\n\n# Preprocess definitions\ndata_transform = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n \n ])\n\n# Turn on evaluation mode\nResNet50.eval()\n\n# Test performance of ResNet50 model on human and dog files short\nfor i in range(0, num_files):\n # define the paths\n human_path = human_files_short[i]\n dog_path = dog_files_short[i]\n \n img_human = Image.open(human_path)\n img_dog = Image.open(dog_path)\n \n img_tensor_human = data_transform(img_human).unsqueeze_(0)\n img_tensor_dog = data_transform(img_dog).unsqueeze_(0)\n \n if use_cuda:\n img_tensor_human = img_tensor_human.cuda()\n img_tensor_dog = img_tensor_dog.cuda()\n \n # Get predicted category for image\n with torch.no_grad():\n output_ResNet50_human = ResNet50(img_tensor_human)\n output_ResNet50_dog = ResNet50(img_tensor_dog)\n \n prediction_human = torch.argmax(output_ResNet50_human).item()\n prediction_dog = torch.argmax(output_ResNet50_dog).item()\n \n if 151 <= prediction_human <= 268:\n human_files_ResNet50 += 1\n if 151 <= prediction_dog <= 268:\n dogs_files_ResNet50 += 1\n\nResNet50.train()\n\n\nprint('ResNet50 ')\nprint('The percentage of the detected face - Human: {0:.0%}'.format(human_files_ResNet50/num_files))\nprint('The percentage of the detected face - Dogs: {0:.0%}'.format(dogs_files_ResNet50/num_files))",
"_____no_output_____"
],
[
"InceptionV3 = models.inception_v3(pretrained=True)\n\nif use_cuda:\n InceptionV3.cuda()\n# Performance variables\nhuman_files_InceptionV3 = 0\ndogs_files_InceptionV3 = 0\n\nnum_files = len(human_files_short)\n\n# Preprocess definitions\ndata_transform = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n \n ])\n\n# Turn on evaluation mode\nInceptionV3.eval()\n\n# Test performance of ResNet50 model on human and dog files short\nfor i in range(0, num_files):\n # define the paths\n human_path = human_files_short[i]\n dog_path = dog_files_short[i]\n \n img_human = Image.open(human_path)\n img_dog = Image.open(dog_path)\n \n img_tensor_human = data_transform(img_human).unsqueeze_(0)\n img_tensor_dog = data_transform(img_dog).unsqueeze_(0)\n \n if use_cuda:\n img_tensor_human = img_tensor_human.cuda()\n img_tensor_dog = img_tensor_dog.cuda()\n \n # Get predicted category for image\n with torch.no_grad():\n output_InceptionV3_human = InceptionV3(img_tensor_human)\n output_InceptionV3_dog = InceptionV3(img_tensor_dog)\n \n prediction_human = torch.argmax(output_InceptionV3_human).item()\n prediction_dog = torch.argmax(output_InceptionV3_dog).item()\n \n if 151 <= prediction_human <= 268:\n human_files_InceptionV3 += 1\n if 151 <= prediction_dog <= 268:\n dogs_files_InceptionV3 += 1\n\nInceptionV3.train()\n\n\nprint('InceptionV3 ')\nprint('The percentage of the detected face - Human: {0:.0%}'.format(human_files_InceptionV3/num_files))\nprint('The percentage of the detected face - Dogs: {0:.0%}'.format(dogs_files_InceptionV3/num_files))",
"_____no_output_____"
]
],
[
[
"Precentage of dogs detected\n\n| model | human_files_short | dog_files_short |\n| --- | --- | --- |\n| VGG - 16 | 0.0 % | 91.0% |\n| ResNet - 50 |\t1.0 % | 95.0% |\n| Inception v3 | 1.0 % | 92.0% |\n",
"_____no_output_____"
],
[
"---\n<a id='step3'></a>\n## Step 3: Create a CNN to Classify Dog Breeds (from Scratch)\n\nNow that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.\n\nWe mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. \n\nBrittany | Welsh Springer Spaniel\n- | - \n<img src=\"images/Brittany_02625.jpg\" width=\"100\"> | <img src=\"images/Welsh_springer_spaniel_08203.jpg\" width=\"200\">\n\nIt is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). \n\nCurly-Coated Retriever | American Water Spaniel\n- | -\n<img src=\"images/Curly-coated_retriever_03896.jpg\" width=\"200\"> | <img src=\"images/American_water_spaniel_00648.jpg\" width=\"200\">\n\n\nLikewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. \n\nYellow Labrador | Chocolate Labrador | Black Labrador\n- | -\n<img src=\"images/Labrador_retriever_06457.jpg\" width=\"150\"> | <img src=\"images/Labrador_retriever_06455.jpg\" width=\"240\"> | <img src=\"images/Labrador_retriever_06449.jpg\" width=\"220\">\n\nWe also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. \n\nRemember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun!\n\n### (IMPLEMENTATION) Specify Data Loaders for the Dog Dataset\n\nUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)!",
"_____no_output_____"
]
],
[
[
"import torch\nimport torchvision.models as models\n\ntorch.cuda.empty_cache()\n# check if CUDA is available\nuse_cuda = torch.cuda.is_available()",
"_____no_output_____"
],
[
"import os\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\n\n### COMPLETED: Write data loaders for training, validation, and test sets\n## Specify appropriate transforms, and batch_sizes\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 100\n\ndata_dir = '/data/dog_images'\ntrain_dir = os.path.join(data_dir, 'train')\nvalid_dir = os.path.join(data_dir, 'valid')\ntest_dir = os.path.join(data_dir, 'test')\n\n# convert data to a normalized torch.FloatTensor\ntrain_transform = transforms.Compose([transforms.Resize(size=256),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(10),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5])\n ])\n\n# convert data to a normalized torch.FloatTensor\nvalidTest_transform = transforms.Compose([transforms.Resize(size=256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5])\n ])\n\ntrain_data = datasets.ImageFolder(train_dir, transform=train_transform)\nvalid_data = datasets.ImageFolder(valid_dir, transform=validTest_transform)\ntest_data = datasets.ImageFolder(test_dir, transform=validTest_transform)\n\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, \n num_workers=num_workers, shuffle=True \n )\n\nvalid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, \n num_workers=num_workers, shuffle=True \n )\n\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=64, \n num_workers=num_workers, shuffle=False \n )",
"_____no_output_____"
]
],
[
[
"**Question 3:** Describe your chosen procedure for preprocessing the data. \n- How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?\n- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not?\n",
"_____no_output_____"
],
[
"**Answer**:\n\nImage Resizing\n\nI took the decision to follow the original VGG16 paper (Simonyan K, Zisserman A 2015), where the authors chose a 224x224 px image as input tensor, randomly cropped from a rescaled version of the original image.\n\nThe rescaling of the original image is necessary, because cropping a 224x224 image out of a much larger original is unlijely to contain the features we are interested in. Thus following the orinal paper I rescaled the original image to 256x256 px before cropping.\n\nData Augmentation\n\nI chose to augment the image data by random rotation up to 10 degrees and by random horizontal flipping. Data augmentation is an easy way to extend a dataset and improve generalization when training the model. ",
"_____no_output_____"
],
[
"### (IMPLEMENTATION) Model Architecture\n\nCreate a CNN to classify dog breed. Use the template in the code cell below.",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\n# define the CNN architecture\nclass Net(nn.Module):\n ### COMPLETED: choose an architecture, and complete the class\n def __init__(self):\n super(Net, self).__init__()\n ## Define layers of a CNN\n # Convolutional layers\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\n self.conv2 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n self.conv3 = nn.Conv2d(128, 256, kernel_size=3, padding=1)\n self.conv4 = nn.Conv2d(256, 512, kernel_size=3, padding=1)\n self.conv5 = nn.Conv2d(512, 512, kernel_size=3, padding= 1)\n # max pooling layer\n self.pool = nn.MaxPool2d(2, 2)\n # Linear layers\n self.fc1 = nn.Linear(25088, 512)\n self.fc2 = nn.Linear(512, 512)\n # The Last fully connected layer's output is 133(Number of breeds)\n self.fc3 = nn.Linear(512, 133)\n # dropout layer(p=0.5)\n self.dropout= nn.Dropout(0.5)\n \n def forward(self, x):\n ## Define forward behavior\n # add sequence of convolutional and max pooling layers\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = self.pool(F.relu(self.conv3(x)))\n x = self.pool(F.relu(self.conv4(x)))\n x = self.pool(F.relu(self.conv5(x)))\n # flatten image input\n x = x.view(-1, 25088)\n # add hidden layers, with relu activation function\n x = F.relu(self.fc1(x))\n # add dropout layer\n x = self.dropout(x)\n # add hidden layers, with relu activation function\n x = F.relu(self.fc2(x))\n # add dropout layer\n x = self.dropout(x)\n # add hidden layers, with relu activation function\n x = self.fc3(x)\n return x\n\n#-#-# You do NOT have to modify the code below this line. #-#-#\n\n# instantiate the CNN\nmodel_scratch = Net()\n\n# move tensors to GPU if CUDA is available\nif use_cuda:\n model_scratch.cuda()",
"_____no_output_____"
],
[
"model_scratch",
"_____no_output_____"
]
],
[
[
"__Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. ",
"_____no_output_____"
],
[
"__Answer:__ \n\noutline:\n\nInput: a fixed size, 224x224 RGB image\n\nKernel Size: 3x3, the smallest size to capture the notion of left/right, up/down, center)\n\nPadding: it is 1 for 3x3 kernel, to keep the same spatial resolution\n\nMaxPooling: 2x2 with stride of 2 pixels, to reduce the size of image and the amount of parameters in half and to capture the most useful pixels(computation reduced!)\n\nActivation Function: ReLU, quick to evaluate, it does not saturate (if the input is very high or very low, the gradient is very, very small)\n\nBatch Normalization 2D: It is a technique to provide any laer in a Neural Network with inputs that are zero mean or unit variance\n\nConvolutional Layers -> (Input channels, Output channels):\n\n1 (3, 64) \n\n2 (64, 128) \n\n3 (128, 256)\n\n4 (256, 512)\n\n5 (512, 512)\n\nAfter 5 convolutions and maxpool layers we end up with 512x7x7 features maps.\n\nThen the features maps are flattened to a vector of length 516x7x7 and fed into a fully connected layers for classification. Reduced the number of nodes per layer as we have only 133 clases, not a 1000.\n\nI used Pytorch CrossEntropyLoss() class, that combines a log_sofmax output-layer activation and a negative log-likelhood loss-function.",
"_____no_output_____"
],
[
"### (IMPLEMENTATION) Specify Loss Function and Optimizer\n\nUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below.",
"_____no_output_____"
]
],
[
[
"import torch.optim as optim\n\n### COMPLETED: select loss function\ncriterion_scratch = nn.CrossEntropyLoss()\n\n### COMPLETED: select optimizer\noptimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01)",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Train and Validate the Model\n\nTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`.",
"_____no_output_____"
]
],
[
[
"# the following import is required for training to be robust to truncated images\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ndef train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path):\n \"\"\"returns trained model\"\"\"\n # initialize tracker for minimum validation loss\n valid_loss_min = np.Inf \n \n for epoch in range(1, n_epochs+1):\n # initialize variables to monitor training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n \n ###################\n # train the model #\n ###################\n model.train()\n for batch_idx, (data, target) in enumerate(loaders['train']):\n # move to GPU\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n ## find the loss and update the model parameters accordingly\n ## record the average training loss, using something like\n ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))\n \n # clear the gradients of all optimized variables\n optimizer.zero_grad() \n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target) \n # backward pass: compute gradiente of the loss with respect to the model parameters\n loss.backward() \n # perform a single optimization step (parameter update)\n optimizer.step() \n #update training loss\n train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss))\n \n ###################### \n # validate the model #\n ######################\n model.eval()\n for batch_idx, (data, target) in enumerate(loaders['valid']):\n # move tensors to GPU\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n ## update the average validation loss\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # update average validation loss\n valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss))\n\n \n # print training/validation statistics \n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(\n epoch, \n train_loss,\n valid_loss\n ))\n \n ## TODO: save the model if validation loss has decreased\n if valid_loss <= valid_loss_min:\n print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min,\n valid_loss))\n torch.save(model.state_dict(), save_path)\n valid_loss_min = valid_loss\n \n # return trained model\n return model\n\n",
"_____no_output_____"
],
[
"# define loader scratch\nloaders_scratch = {'train': train_loader,\n 'valid': valid_loader,\n 'test': test_loader}\n\n# train the model\nmodel_scratch = train(5, loaders_scratch, model_scratch, optimizer_scratch, \n criterion_scratch, use_cuda, 'model_scratch.pt')\n\n# load the model that got the best validation accuracy\nmodel_scratch.load_state_dict(torch.load('model_scratch.pt'))",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Test the Model\n\nTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%.",
"_____no_output_____"
]
],
[
[
"def test(loaders, model, criterion, use_cuda):\n\n # monitor test loss and accuracy\n test_loss = 0.\n correct = 0.\n total = 0.\n\n model.eval()\n for batch_idx, (data, target) in enumerate(loaders['test']):\n # move to GPU\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # update average test loss \n test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))\n # convert output probabilities to predicted class\n pred = output.data.max(1, keepdim=True)[1]\n # compare predictions to true label\n correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())\n total += data.size(0)\n \n print('Test Loss: {:.6f}\\n'.format(test_loss))\n\n print('\\nTest Accuracy: %2d%% (%2d/%2d)' % (\n 100. * correct / total, correct, total))\n\n",
"_____no_output_____"
],
[
"# call test function \ntest(loaders_scratch, model_scratch, criterion_scratch, use_cuda)",
"_____no_output_____"
]
],
[
[
"---\n<a id='step4'></a>\n## Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)\n\nYou will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set.\n\n### (IMPLEMENTATION) Specify Data Loaders for the Dog Dataset\n\nUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). \n\nIf you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch.",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms, models",
"_____no_output_____"
],
[
"## COMPLETED: Specify data loaders\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 128\n\ndata_dir = 'dogImages'\ntrain_dir = os.path.join(data_dir, 'train')\nvalid_dir = os.path.join(data_dir, 'valid')\ntest_dir = os.path.join(data_dir, 'test')\n\n# convert data to a normalized torch.FloatTensor\ntrain_transform = transforms.Compose([transforms.Resize(size=256),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(10),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\n# convert data to a normalized torch.FloatTensor\nvalidTest_transform = transforms.Compose([transforms.Resize(size=256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\ntrain_data = datasets.ImageFolder(train_dir, transform=train_transform)\nvalid_data = datasets.ImageFolder(valid_dir, transform=validTest_transform)\ntest_data = datasets.ImageFolder(test_dir, transform=validTest_transform)\n\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, \n num_workers=num_workers, shuffle=True \n )\n\nvalid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, \n num_workers=num_workers, shuffle=True \n )\n\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=16, \n num_workers=num_workers, shuffle=False \n )",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Model Architecture\n\nUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`.",
"_____no_output_____"
]
],
[
[
"import torchvision.models as models\nimport torch.nn as nn\n\n## COMPLETED: Specify model architecture \n#Check if CUDA is available\nuse_cuda = torch.cuda.is_available()\n#Load the pretrained model from pytorch\nmodel_transfer = models.vgg16(pretrained=True)\n# Freeze training for all \"features\" layers\nfor param in model_transfer.features.parameters():\n param.requires_grad = False\n# new layer automatically have required_grad = True\nn_inputs = model_transfer.classifier[6].in_features\n\nlast_layer = nn.Linear(n_inputs, 133)\n\nmodel_transfer.classifier[6] = last_layer\n\nif use_cuda:\n model_transfer = model_transfer.cuda()",
"_____no_output_____"
],
[
"model_transfer",
"_____no_output_____"
]
],
[
[
"__Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem.",
"_____no_output_____"
],
[
"__Answer:__ \n\nI choose the VGG16 model because we have already worked on this model in previous exercises and it seems to work correct with large data.\n\nNext, freeze all the parameters, so the net acts as a fized feature extractor. Remove the last layer because we won't need 1000 classes, add a new layer to the last layer with the correct classes (133).\n",
"_____no_output_____"
],
[
"### (IMPLEMENTATION) Specify Loss Function and Optimizer\n\nUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.html#loss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below.",
"_____no_output_____"
]
],
[
[
"criterion_transfer = nn.CrossEntropyLoss()\noptimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr= 0.001)",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Train and Validate the Model\n\nTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`.",
"_____no_output_____"
]
],
[
[
"n_epochs=7\n\n# define loader scratch\nloaders_transfer = {'train': train_loader,\n 'valid': valid_loader,\n 'test': test_loader}\n\n# train the model\nmodel_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt')\n\n# load the model that got the best validation accuracy (uncomment the line below)\nmodel_transfer.load_state_dict(torch.load('model_transfer.pt'))",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Test the Model\n\nTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%.",
"_____no_output_____"
]
],
[
[
"test(loaders_transfer, model_transfer, criterion_transfer, use_cuda)",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Predict Dog Breed with the Model\n\nWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ",
"_____no_output_____"
]
],
[
[
"### COMPLETED: Write a function that takes a path to an image as input\n### and returns the dog breed that is predicted by the model.\n\n# list of class names by index, i.e. a name can be accessed like class_names[0]\n#class_names = [item[4:].replace(\"_\", \" \") for item in data_transfer['train'].classes]\nclass_names = [item[4:].replace(\"_\", \" \") for item in train_data.classes ]\n\nmodel_transfer.load_state_dict(torch.load('model_transfer.pt'))\n\ndef predict_breed_transfer(img_path):\n # load the image and return the predicted breed\n img = Image.open(img_path)\n \n # define normalization step for image\n normalize = transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std =(0.229, 0.224, 0.225))\n \n # Define transformation of image\n data_transform = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transform.ToTensor(),\n normalize])\n \n # Preprocess image to 4D Tensor(.unsqueeze_(0) add a dimension)\n img_tensor = data_transform(img).unsqueeze_(0)\n \n # Move tensor to GPU if available\n if use_cuda:\n img_tensor = img_tensor.cuda()\n \n # Turn on evaluation mode\n model_transfer.eval()\n \n # Get predicted category for image\n with torch.no_grad():\n output = model_transfer(img_tensor)\n prediction = torch.argmax(output).item()\n \n # Turn off evaluation mode\n mode_transfer.train()\n \n # Use prediction to get dog breed\n breed = class_names[prediction]\n \n return breed, prediction",
"_____no_output_____"
]
],
[
[
"---\n<a id='step5'></a>\n## Step 5: Write your Algorithm\n\nWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,\n- if a __dog__ is detected in the image, return the predicted breed.\n- if a __human__ is detected in the image, return the resembling dog breed.\n- if __neither__ is detected in the image, provide output that indicates an error.\n\nYou are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. \n\nSome sample output for our algorithm is provided below, but feel free to design your own user experience!\n\n\n\n\n### (IMPLEMENTATION) Write your Algorithm",
"_____no_output_____"
]
],
[
[
"### COMPLETED: Write your algorithm.\n### Feel free to use as many code cells as needed.\n\ndef run_app(img_path):\n ## handle cases for a human face, dog, and neither\n breed, prediction = predict_breed_transfer(img_path)\n \n #if it's a dog\n if dog_detector(img_path):\n plt.imshow(Image.open(img_path))\n plt.show()\n print(f'This is a picture of a ... {breed}, with prediction {prediction}% ')\n print('\\n-----------------------------------\\n')\n elif face_detector(img_path):\n print('Hello Human!')\n plt.imshow(Image.open(img_path))\n plt.show()\n print(f'This is a picture of a ... {breed}, with prediction {prediction}% ')\n print('\\n-----------------------------------\\n') \n else:\n plt.imshow(Image.open(img_path))\n plt.show()\n print('Sorry, I did not detect a human or a dog in this image')\n print('\\n-----------------------------------\\n')",
"_____no_output_____"
]
],
[
[
"---\n<a id='step6'></a>\n## Step 6: Test Your Algorithm\n\nIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog?\n\n### (IMPLEMENTATION) Test Your Algorithm on Sample Images!\n\nTest your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. \n\n__Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm.",
"_____no_output_____"
],
[
"__Answer:__ (Three possible points for improvement)\n\nThe output is better than I expected. \n\nFirst thing, I must improve is the face_detector algoritm. I would build a new neural network using transfer learning and the VGG16 network\n\nSecond thing, use also transfer learning on the dog detector to increase accuracy.\n\nFinally, I would improve predict_breed_transfer, by increasing the number of epochs or use other networks like InceptronV3 or ResNet50 to train the model. ",
"_____no_output_____"
]
],
[
[
"## COMPLETED: Execute your algorithm from Step 6 on\n## at least 6 images on your computer.\n## Feel free to use as many code cells as needed.\n\n## suggested code, below\nfor file in np.hstack((human_files[:3], dog_files[:3])):\n run_app(file)",
"_____no_output_____"
],
[
"import numpy as np\nfrom glob import glob\n\n# load filenames\nfiles = np.array(glob(\"my_images/*\"))\nfor file_path in files:\n run_app(file_path)\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e7518f8bb86a7f1115bec65c205915fc26844d79 | 4,418 | ipynb | Jupyter Notebook | starter_code/VacationPy.ipynb | DiamondN97/python-api-challenge | 4e206f6c80ba5e27f4b8f6dd27c6eaf8e1f13752 | [
"ADSL"
] | null | null | null | starter_code/VacationPy.ipynb | DiamondN97/python-api-challenge | 4e206f6c80ba5e27f4b8f6dd27c6eaf8e1f13752 | [
"ADSL"
] | null | null | null | starter_code/VacationPy.ipynb | DiamondN97/python-api-challenge | 4e206f6c80ba5e27f4b8f6dd27c6eaf8e1f13752 | [
"ADSL"
] | null | null | null | 22.313131 | 156 | 0.54278 | [
[
[
"# VacationPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n# Import API key\napi_key = \"c280ce5700cf1679dcce087b5b74f838\"",
"_____no_output_____"
]
],
[
[
"### Store Part I results into DataFrame\n* Load the csv exported in Part I to a DataFrame",
"_____no_output_____"
]
],
[
[
"weather_data = pd.read_csv('')\n",
"_____no_output_____"
]
],
[
[
"### Humidity Heatmap\n* Configure gmaps.\n* Use the Lat and Lng as locations and Humidity as the weight.\n* Add Heatmap layer to map.",
"_____no_output_____"
],
[
"### Create new DataFrame fitting weather criteria\n* Narrow down the cities to fit weather conditions.\n* Drop any rows will null values.",
"_____no_output_____"
],
[
"### Hotel Map\n* Store into variable named `hotel_df`.\n* Add a \"Hotel Name\" column to the DataFrame.\n* Set parameters to search for hotels with 5000 meters.\n* Hit the Google Places API for each city's coordinates.\n* Store the first Hotel result into the DataFrame.\n* Plot markers on top of the heatmap.",
"_____no_output_____"
]
],
[
[
"# NOTE: Do not change any of the code in this cell\n\n# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel Name}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\n# NOTE: be sure to update with your DataFrame name\nhotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]\nlocations = hotel_df[[\"Lat\", \"Lng\"]]",
"_____no_output_____"
],
[
"# Add marker layer ontop of heat map\n\n\n# Display figure\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e75196e1cc355e6ab41d007b1a5bf4291b6c5307 | 1,433 | ipynb | Jupyter Notebook | pra_matplotlib/plot_2_figure.ipynb | RayleighChen/practices | e3ac6e13b3f12b61ebea7f766aa78429ea91f456 | [
"Apache-2.0"
] | null | null | null | pra_matplotlib/plot_2_figure.ipynb | RayleighChen/practices | e3ac6e13b3f12b61ebea7f766aa78429ea91f456 | [
"Apache-2.0"
] | null | null | null | pra_matplotlib/plot_2_figure.ipynb | RayleighChen/practices | e3ac6e13b3f12b61ebea7f766aa78429ea91f456 | [
"Apache-2.0"
] | null | null | null | 17.691358 | 50 | 0.482903 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\nx = np.linspace(-3, 3, 50)\n\ny1 = x*2 - 1\ny2 = x ** 2\n\nplt.figure()\nplt.plot(x, y1)",
"_____no_output_____"
],
[
"plt.figure()\nplt.plot(x, y2)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7519a43f6132aca73e7449f0b6fd6f15d18bfdc | 685,872 | ipynb | Jupyter Notebook | Exercicios_Steepest_Descent.ipynb | marianasmoura/tecnicas-de-otimizacao | 755153b20e2100237904af2835d2e850d2daa0a0 | [
"MIT"
] | null | null | null | Exercicios_Steepest_Descent.ipynb | marianasmoura/tecnicas-de-otimizacao | 755153b20e2100237904af2835d2e850d2daa0a0 | [
"MIT"
] | null | null | null | Exercicios_Steepest_Descent.ipynb | marianasmoura/tecnicas-de-otimizacao | 755153b20e2100237904af2835d2e850d2daa0a0 | [
"MIT"
] | null | null | null | 113.086892 | 1,259 | 0.61072 | [
[
[
"<a href=\"https://colab.research.google.com/github/marianasmoura/tecnicas-de-otimizacao/blob/main/Exercicios_Steepest_Descent.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"\nUNIVERSIDADE FEDERAL DO PIAUÍ\n\nCURSO DE GRADUAÇÃO EM ENGENHARIA ELÉTRICA\n\nDISCIPLINA: TÉCNICAS DE OTIMIZAÇÃO\n\nDOCENTE: ALDIR SILVA SOUSA\n\nDISCENTE: MARIANA DE SOUSA MOURA\n\nAtividade 3: Otimização Irrestrita pelo Método de Newton Multivariável\n\n\nResolva os exercícios usando o método de Descida Gradiente.",
"_____no_output_____"
],
[
"# **Método da Descida Gradiente - Multivariável**",
"_____no_output_____"
],
[
"#**Exercícios**",
"_____no_output_____"
],
[
"Trecho responsável pela busca do lambda através do método da bisseção",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sympy as sym #Para criar variáveis simbólicas.\n\nclass Params:\n def __init__(self,f,vars,eps,a,b):\n self.f = f\n self.a = a\n self.b = b \n self.vars = vars #variáveis simbólicas\n self.eps = eps\n\ndef eval(sym_f,vars,x):\n map = dict()\n map[vars[0]] = x\n return sym_f.subs(map) \n\nimport pandas as pd\nimport math\n\ndef bissecao(params):\n n = math.ceil( -math.log(params.eps/(params.b-params.a),2) )\n f = params.f\n diff = sym.diff(f) #retorna a derivada simbólica de f\n \n a = params.a\n b = params.b\n\n for k in range(n):\n x = (b + a)/2\n \n fx = eval(f,params.vars,x) #Não é necessário. Somente para debug\n dfx = eval(diff,params.vars,x) \n\n if (dfx == 0): break # Mínimo encontrado. Parar.\n if (dfx > 0 ):\n #Passo 2\n b = x\n else:\n #Passo 3\n a = x \n x = (a+b)/2 \n return x\n",
"_____no_output_____"
]
],
[
[
"Calcula o gradiente e permite a substituição de valores em variáveis nas funções",
"_____no_output_____"
]
],
[
[
"# Função para o cálculo do gradiente \nimport sympy as sym #Para criar variáveis simbólicas.\ndef gradiente_simbolico(funcao,variaveis):\n g1 = [sym.diff(funcao,x) for x in variaveis]\n return g1\n\n# Função para substituição dos valores nas variáveis simbólicas\ndef eval_simbolica(f,variaveis,x):\n mp = dict()\n for i in range(len(variaveis)):\n mp[variaveis[i]] = x[i]\n return float(f.subs(mp))\n\ndef eval_gradiente(grad_simb,variaveis,x):\n g2 = [ eval_simbolica(f,variaveis,x) for f in grad_simb]\n return g2\n\n# Função para substituição da expressão com o lambda nas variáveis simbólicas de uma função\ndef eval_lambda(f,variaveis,x):\n mp = dict()\n for i in range(len(variaveis)):\n mp[variaveis[i]] = x[i]\n return f.subs(mp)",
"_____no_output_____"
]
],
[
[
"Parâmetros que serõ utilizados pela função",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sympy as sym \n\nclass Parametros:\n def __init__(self,f,d1f,vars,m,eps,nmax):\n self.f = f\n self.d1f = d1f\n self.m = m\n self.eps = eps\n self.nmax = nmax\n self.vars = vars",
"_____no_output_____"
]
],
[
[
"Código para a otimização a partir da Descida Gradiente",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport math\n\nlmbd = sym.Symbol('lmbd')\n\ndef steepestDescent(p):\n f = p.f\n d1f = p.d1f\n m = p.m\n eps = p.eps\n vars= p.vars\n nmax = p.nmax\n k = 0\n v = [0 for i in vars]\n \n cols = ['x','grad(x)','lambda'] \n table = pd.DataFrame([], columns=cols)\n\n\n g = eval_gradiente(d1f,vars,m)\n tolerancia = np.linalg.norm(g) # avalia o valor do módulo do gradiente no ponto\n \n while ((tolerancia > eps) & (k < nmax)):\n #while (tolerancia > eps):\n d = eval_gradiente(d1f,vars,m)\n \n for j in range(0,len(m)):\n v[j] = m[j] - lmbd*d[j]\n\n param = Params(f(v),[lmbd],eps,0,3)\n lmb = bissecao(param)\n \n for j in range(0,len(m)):\n m[j] = m[j] - lmb*d[j]\n valor = np.array(m)\n k = k + 1\n \n row = pd.DataFrame([[valor,d,lmb]],columns=cols)\n table = table.append(row, ignore_index=True) # concatena valores de cada iteração\n \n\n g = eval_gradiente(d1f,vars,m)\n tolerancia = np.linalg.norm(g) # calcula o valor da incertexa critério de parada\n print(k,'Derivada: ', g)\n print(k,'Solução: ', m)\n print(k,'Tolerância: ', tolerancia)\n print('=====================================================================')\n return m,table",
"_____no_output_____"
]
],
[
[
"**1.** Considere o seguinte problema:\n\nMinimizar $\\sum_{i=2}^{n} [100(x_i-x^2_{i-1})^2 + (1-x_{i-1})^2]$\n\nResolva para n = 5, 10, e 50. Iniciando do ponto $x_0 = [-1.2,1.0,-1.2,1.0,...]$",
"_____no_output_____"
]
],
[
[
"# Para n = 5\n\nimport numpy as np\nimport sympy as sym \n\nvariaveis = list(sym.symbols(\"x:5\"))\n\nc = variaveis\n\ndef f1(c):\n fo = 0\n for i in range(1,5):\n fo = fo + 100*(c[i] - c[i-1]**2)**2 + (1 - c[i-1])**2\n return fo\n\nx = []\nfor i in range(1,6):\n if (i%2 != 0):\n x.append(-1.2)\n else: \n x.append(1)\n\neps = 1e-3\nnmax = 500\nd1f = gradiente_simbolico(f1(c),c)\np = Parametros(f1,d1f,c,x,eps,nmax)\nm,df = steepestDescent(p)",
"1 Derivada: [-311.2609414050821, -135.48916022479534, -121.52078589354642, 16.313055351376534, -146.69422149658203]\n1 Solução: [-0.963134765625, 0.1298828125, -0.479736328125, 0.1298828125, -0.7166015625]\n1 Tolerância: 389.60659386407815\n=====================================================================\n2 Derivada: [32.344427502331506, 103.91754146603932, -79.04925388766392, 23.2919039382602, -91.59979279350011]\n2 Solução: [-0.39319895982956155, 0.37797087443505006, -0.25722512348593796, 0.10001271603141504, -0.44799642059952016]\n2 Tolerância: 164.39840870238746\n=====================================================================\n3 Derivada: [-24.87305260419642, -21.80554405653276, -14.701118422485173, 9.008686649215523, -42.96138972767231]\n3 Solução: [-0.47611314165926877, 0.11158069245423635, -0.054584213861799485, 0.040304466189683565, -0.21318249864352623]\n3 Tolerância: 56.89561338803853\n=====================================================================\n4 Derivada: [5.985546417202201, 12.822683801661958, -9.024512395677563, -1.1288998252196762, -8.02727533787585]\n4 Solução: [-0.3759165186199033, 0.19942040850228093, 0.004636600095965497, 0.004014590771701111, -0.040120259750315024]\n4 Tolerância: 18.638706337486916\n=====================================================================\n5 Derivada: [-2.6925744645993923, -2.169561762677032, -0.08500268109835288, -0.6314059766238522, -2.7446021672270504]\n5 Solução: [-0.39564427170394373, 0.15715814499582673, 0.03438047640007856, 0.007735329941736665, -0.013663175506827725]\n5 Tolerância: 4.4604591141429175\n=====================================================================\n6 Derivada: [-0.02544129667467132, 1.4196666158318285, -0.616412134359795, -0.15365778473212016, -0.5425386610653766]\n6 Solução: [-0.3847977192952792, 0.16589783471364192, 0.03472289442696402, 0.01027883546280443, -0.002607038846855477]\n6 Tolerância: 1.6474293285689914\n=====================================================================\n7 Derivada: [-0.8938411561466069, 0.018122440830740327, 0.2425677762588164, -0.07314284526022494, -0.10805635610448602]\n7 Solução: [-0.38469523360310826, 0.16017896284810842, 0.03720599994867706, 0.010897818238214582, -0.00042151933816926774]\n7 Tolerância: 0.9354921770221568\n=====================================================================\n8 Derivada: [-0.0731738652177647, 1.2628102568973434, -0.1633354232699557, 0.07688013495202196, 0.07121073134585747]\n8 Solução: [-0.377166542224432, 0.1600263202190409, 0.03516288757540333, 0.011513889566700363, 0.0004886233096834588]\n8 Tolerância: 1.2797281521400827\n=====================================================================\n9 Derivada: [-0.9415229863531671, -0.040303541668202936, 0.37419645772408683, -0.00658427890844789, 0.00506641598697145]\n9 Solução: [-0.3768181803172674, 0.154014406154222, 0.03594048541567778, 0.011147883064853578, 0.00014960737676250645]\n9 Tolerância: 1.0139930569660418\n=====================================================================\n10 Derivada: [-0.11709805114856309, 1.2852205938869674, -0.21816978312546603, 0.04353782575427884, -0.0029524733840692145]\n10 Solução: [-0.36957746399155333, 0.15432435770757852, 0.03306275386823912, 0.011198518998841885, 0.0001106444608470766]\n10 Tolerância: 1.3095824686416153\n=====================================================================\n11 Derivada: [-0.9623157101786788, -0.017065580219852716, 0.3614304183568914, -0.012366445348026623, 0.0007785845704430215]\n11 Solução: [-0.3690199903593763, 0.14820575380601703, 0.03410140395880225, 0.010991246635021466, 0.0001247004254440858]\n11 Tolerância: 1.0281674195203092\n=====================================================================\n12 Derivada: [-0.11022490475755475, 1.3777368709439, -0.2538286127015774, 0.04838527156347659, -0.000993108299108321]\n12 Solução: [-0.36091454797627953, 0.14834949465503677, 0.031057129194809876, 0.011095407368543662, 0.0001181425231783914]\n12 Tolerância: 1.4060865907124898\n=====================================================================\n13 Derivada: [-1.0039345267297612, -0.024217242928426863, 0.36994282832861547, -0.013492361235450974, 0.0009641959729733106]\n13 Solução: [-0.36038979562208706, 0.1417904446102599, 0.032265541779692875, 0.010865057564957776, 0.00012287045575471274]\n13 Tolerância: 1.070285792922863\n=====================================================================\n14 Derivada: [-0.15065537897458015, 1.4009427272228974, -0.26237287966869316, 0.04778367646544542, -0.001156543055734191]\n14 Solução: [-0.3519338041249736, 0.14199442285269906, 0.02914956825006953, 0.01097870172087161, 0.00011474917619719829]\n14 Tolerância: 1.4340368591799655\n=====================================================================\n15 Derivada: [-1.0205532450449724, 0.01080724429674862, 0.3584706964610509, -0.013090125435630874, 0.0009333069944189656]\n15 Solução: [-0.3512165727065155, 0.13532489570893771, 0.030398657887164136, 0.010751215956448712, 0.00012025517951429222]\n15 Tolerância: 1.0818129159740377\n=====================================================================\n16 Derivada: [-0.10998298660656758, 1.5771580294283198, -0.34136343314823475, 0.053693052610886124, -0.0014723115222813367]\n16 Solução: [-0.3411256531693277, 0.1352180369701637, 0.026854208862097592, 0.010880647128358636, 0.00011102692432045235]\n16 Tolerância: 1.6183132614619966\n=====================================================================\n17 Derivada: [-1.087208840164621, -0.02628407696854529, 0.3790830154459162, -0.015980839943207883, 0.0010289985020052565]\n17 Solução: [-0.3406020525250826, 0.12770959420310996, 0.02847935215955623, 0.010625028347618333, 0.00011803621989771945]\n17 Tolerância: 1.1518134803504276\n=====================================================================\n18 Derivada: [-0.1307811587003087, 1.6616897221636229, -0.38017666407790446, 0.05588536240126998, -0.001682439810895834]\n18 Solução: [-0.3298520627724783, 0.12796948314774523, 0.024731094804683277, 0.010783042023814603, 0.00010786179623287255]\n18 Tolerância: 1.710548667667499\n=====================================================================\n19 Derivada: [-1.122066515985754, -0.008945067768165504, 0.37429766229440364, -0.016476055888828826, 0.0010528918476214733]\n19 Solução: [-0.3292294473929236, 0.12005860678490571, 0.02654101788806197, 0.010516986221367151, 0.00011587145841853388]\n19 Tolerância: 1.182998135895292\n=====================================================================\n20 Derivada: [-0.16388959195797703, 1.742830392769113, -0.4209725303647573, 0.05797361467014495, -0.001925693538856483]\n20 Solução: [-0.31731296950098503, 0.12015360445285375, 0.022565932851488106, 0.010691963865694312, 0.00010468962361103044]\n20 Tolerância: 1.8013606235085542\n=====================================================================\n21 Derivada: [-1.1563178358552655, 0.016890820812165197, 0.3643471401600201, -0.016734845819914874, 0.0010729994714376162]\n21 Solução: [-0.31653273340645854, 0.11185643827634065, 0.02457007453657423, 0.010415966432572284, 0.00011385735408166067]\n21 Tolerância: 1.2125948549618626\n=====================================================================\n22 Derivada: [-0.11611741246096585, 2.0131809033798316, -0.5543422066481393, 0.06820568363420354, -0.002580488420384438]\n22 Solução: [-0.3017117650513801, 0.11163994215997379, 0.01990009776450366, 0.010630463357754298, 0.00010010430909863459]\n22 Tolerância: 2.0924470266847597\n=====================================================================\n23 Derivada: [-1.233973548473335, -0.019012673780795075, 0.3815073178323849, -0.019866145096879954, 0.0012361580453422866]\n23 Solução: [-0.30115895998717385, 0.10205570690218406, 0.0225391780940053, 0.010305753682249667, 0.00011238934918591401]\n23 Tolerância: 1.2918963679272237\n=====================================================================\n24 Derivada: [-0.18762077286039514, 2.086422240005907, -0.600238472734489, 0.07096194448641605, -0.0029953414350544286]\n24 Solução: [-0.2853426486358921, 0.1022993996203217, 0.017649252755577514, 0.010560385668965047, 9.65450383020102e-05]\n24 Tolerância: 2.180295877119946\n=====================================================================\n25 Derivada: [-1.2655515089156832, 0.033287760365638186, 0.3556635818265716, -0.019175884623895367, 0.001260894673409032]\n25 Solução: [-0.28444943450728427, 0.09236648124138733, 0.020506833375285166, 0.010222554146141533, 0.0001108050866378406]\n25 Tolerância: 1.3151405189382295\n=====================================================================\n26 Derivada: [-0.1721109181314855, 2.4226171639401866, -0.7881601322173531, 0.08597921763983583, -0.004207025422521448]\n26 Solução: [-0.26359378830322944, 0.09181791585450243, 0.014645678157195911, 0.010538562986989419, 9.00261827181361e-05]\n26 Tolerância: 2.5548586282030157\n=====================================================================\n27 Derivada: [-1.3447385558835983, 0.023548632664408098, 0.355206218423325, -0.021620686811852707, 0.0014906563356993224]\n27 Solução: [-0.2627744125943515, 0.08028446011797077, 0.018397905349148643, 0.010129238098323208, 0.00011005474613101896]\n27 Tolerância: 1.3912287574447972\n=====================================================================\n28 Derivada: [-0.21900325865383596, 2.7583600496911367, -0.9986779653248755, 0.10453381619425409, -0.006031299071083674]\n28 Solução: [-0.2366741403304975, 0.07982740047519235, 0.011503644029652954, 0.010548877893621618, 8.11223294591205e-05]\n28 Tolerância: 2.943609003375812\n=====================================================================\n29 Derivada: [-1.4181461160386029, 0.052995798526013105, 0.33082863650962335, -0.022554221307600993, 0.0017617558148132935]\n29 Solução: [-0.2356315222778242, 0.06669555941831316, 0.01625809235090175, 0.010051219344845262, 0.0001098357893922581]\n29 Tolerância: 1.4573628046968756\n=====================================================================\n30 Derivada: [-3.596090865094121, 6.532339024279577, -16.462209656187177, 0.2368645560845063, -0.12121687729318623]\n30 Solução: [0.13569702006870937, 0.05281909605914591, -0.07036634921564391, 0.01595683515353717, -0.0003514637983487715]\n30 Tolerância: 18.074248746536366\n=====================================================================\n31 Derivada: [-1.5945612250410455, -2.3485668428082205, -0.5244287169773665, 0.9826421245454924, 0.001142760615129211]\n31 Solução: [0.15281708156024632, 0.021720314083205544, 0.008005986794036246, 0.014829184068662203, 0.00022561850321791094]\n31 Tolerância: 3.0494268388610677\n=====================================================================\n32 Derivada: [-2.8723050985392207, 1.4499386124772344, 0.2839155586381968, -1.149731253734268, 0.038836040814193895]\n32 Solução: [0.1697515086328258, 0.046662369176505736, 0.01357548122482615, 0.004393409552615104, 0.00021348225156797912]\n32 Tolerância: 3.4287689915424666\n=====================================================================\n33 Derivada: [-1.5172394239429527, -2.3311162614705285, -0.010742274375975587, 0.6489606723725274, -0.05207154660267335]\n33 Solução: [0.19184076903418942, 0.03551171822605825, 0.011392048583931814, 0.013235336918979714, -8.518358965445931e-05]\n33 Tolerância: 2.8565880505893118\n=====================================================================\n34 Derivada: [-3.2197794189385256, 1.8389718561864625, -0.5079333479853104, -0.938341766800329, 0.1030036199667783]\n34 Solução: [0.210176548205375, 0.06368316718670063, 0.011521868550145387, 0.005392672543383985, 0.000544099016994059]\n34 Tolerância: 3.859778270511116\n=====================================================================\n35 Derivada: [-1.468880919811907, -2.298824602353729, 0.35576984479202645, 0.2272282716025655, -0.04467412220052223]\n35 Solução: [0.2302215616777706, 0.05223448985936791, 0.014684051258550029, 0.011234399851345018, -9.715887098271018e-05]\n35 Tolerância: 2.7608705793730124\n=====================================================================\n36 Derivada: [-3.461828081381004, 1.9855020346229622, -1.3231933866324632, -0.34082176994782154, 0.08123839352650795]\n36 Solução: [0.24904877073297693, 0.08169940480652874, 0.010124012964706917, 0.008321928108392213, 0.00047544645507378814]\n36 Tolerância: 4.2190131415309695\n=====================================================================\n37 Derivada: [-1.3495437695586237, -2.4222489858408296, 0.44215347942772765, -0.0009834716270967856, -0.014945882135512324]\n37 Solução: [0.26806516034017236, 0.07079271638391725, 0.017392531323894032, 0.010194118006787229, 2.9190631258742012e-05]\n37 Tolerância: 2.8078964691278636\n=====================================================================\n38 Derivada: [-3.471772579626915, 1.6867292448602427, -1.4826504813028722, 0.03046244265930138, 0.01894330856336148]\n38 Solução: [0.2833859087006401, 0.09829139261477628, 0.012372966677070465, 0.010205282906850705, 0.0001988643420256666]\n38 Tolerância: 4.134948566690506\n=====================================================================\n39 Derivada: [-1.4377004203241324, -2.164328838041144, 0.2204793341611821, -0.0433742422521173, 0.0014942642405289253]\n39 Solução: [0.29991412288587566, 0.0902613095398801, 0.019431483372726228, 0.01006025907094827, 0.00010868013377724157]\n39 Tolerância: 2.6080254048389633\n=====================================================================\n40 Derivada: [-4.097884761230047, 2.4962828322870285, -2.046887399586933, 0.1267811532598702, -0.0064092334624002816]\n40 Solução: [0.32360669573057266, 0.1259283497175601, 0.015798095907911827, 0.01077504455728077, 8.405541789938452e-05]\n40 Tolerância: 5.218232465191218\n=====================================================================\n41 Derivada: [-1.346272295478144, -2.221391602169077, 0.07836488635951586, -0.04224307065981457, 0.0009034807863233206]\n41 Solução: [0.3401142881525199, 0.11587252287851323, 0.024043613997068176, 0.010264329462361858, 0.00010987386324352629]\n41 Tolerância: 2.599029836779434\n=====================================================================\n42 Derivada: [-4.350388142901469, 2.5790322444384817, -2.149119994516966, 0.11059343482504957, -0.005029405514320448]\n42 Solução: [0.36230017192809966, 0.15247992843183664, 0.022752200464532208, 0.010960473815276282, 9.498495868375477e-05]\n42 Tolerância: 5.496203895902145\n=====================================================================\n43 Derivada: [-1.5665730245315395, -1.8118219404124214, -0.23509454860558365, -0.03764241540584296, -0.00014263848331351123]\n43 Solução: [0.3766386094108071, 0.14397970008712974, 0.029835481696460685, 0.010595969086238643, 0.00011156136845995742]\n43 Tolerância: 2.406976568958891\n=====================================================================\n44 Derivada: [-6.964927976911312, 6.822178402569541, -6.144537462149489, 0.21215367631005094, -0.009093923978829285]\n44 Solução: [0.47129847844683154, 0.25345868696507756, 0.044041023879049054, 0.012870504685102445, 0.00012018027095509757]\n44 Tolerância: 11.526177700592331\n=====================================================================\n45 Derivada: [-0.37593898135770587, -2.576113954210487, -1.2676310358825824, -0.22500052586134453, -0.0016908119413442946]\n45 Solução: [0.4891529080751442, 0.23597019252489684, 0.0597924016506725, 0.012326653708233418, 0.0001434923319359832]\n45 Tolerância: 2.904343258190724\n=====================================================================\n46 Derivada: [-4.026394437886246, 1.153195206192036, -1.3046718813099372, -0.12013238993428012, -0.008905159575911217]\n46 Solução: [0.49231938631167765, 0.2576684179692967, 0.0704694697019296, 0.014221799739047965, 0.00015773378993802866]\n46 Tolerância: 4.388438351382504\n=====================================================================\n47 Derivada: [-0.6341754707947587, -2.8910325237880263, -0.05629145421728449, -0.16515692885008623, -0.005318829023872665]\n47 Solução: [0.5055899734482656, 0.25386760369107586, 0.07476953571698922, 0.0146177438953255, 0.0001870842914699626]\n47 Tolerância: 2.964915076400198\n=====================================================================\n48 Derivada: [-3.474993054979727, 0.8596537369158632, -1.908553112345271, 0.03203197170080125, -0.004919679920677482]\n48 Solução: [0.5095380873411529, 0.2718659751160257, 0.07511998300276576, 0.015645942549055283, 0.00022019711864495112]\n48 Tolerância: 4.056871517888523\n=====================================================================\n49 Derivada: [-0.45801097589314566, -2.9137367323224073, -0.3210509889113722, -0.1863439326260562, -0.0010182333463967103]\n49 Solução: [0.5209913115213839, 0.2690326437077337, 0.0814103802236303, 0.015540368423576569, 0.00023641188400851215]\n49 Tolerância: 2.97278230532108\n=====================================================================\n50 Derivada: [-3.2519039672370837, 0.5460416297638719, -1.7691438254504495, -0.037442100259456275, -0.006472086516154961]\n50 Solução: [0.5235072409543907, 0.28503827761331335, 0.08317396597814833, 0.01656398621754294, 0.00024200520683417769]\n50 Tolerância: 3.7422392424152457\n=====================================================================\n51 Derivada: [-0.47041292873231555, -2.859940308734945, -0.377901699485024, -0.21341978023342026, -0.0030264981966996313]\n51 Solução: [0.5342251861589075, 0.28323857985896467, 0.08900488434816324, 0.016687391577284803, 0.0002633365466701376]\n51 Tolerância: 2.9306848601999724\n=====================================================================\n52 Derivada: [-3.6134376896596265, 1.015970591522068, -2.0245655969286553, -0.029691550333013836, -0.00847999156559738]\n52 Solução: [0.5371537822728416, 0.30104338402711434, 0.09135754385423456, 0.018016054759890324, 0.00028217827128337994]\n52 Tolerância: 4.264850235890543\n=====================================================================\n53 Derivada: [-0.878251862655631, -2.3227072211007176, -0.6492920864114411, -0.20949538209338037, -0.004682004962239444]\n53 Solução: [0.5464167451003382, 0.29843896722755825, 0.09654747031120499, 0.018092168353273295, 0.0003039165309119865]\n53 Tolerância: 2.5752253530471743\n=====================================================================\n54 Derivada: [-4.946450567222726, 2.759879687605599, -2.902889765869675, 0.009087427045259343, -0.01414823612335249]\n54 Solução: [0.558316886305755, 0.32991119641971883, 0.10534525126721939, 0.020930789863571782, 0.0003673567836962368]\n54 Tolerância: 6.36485356082468\n=====================================================================\n55 Derivada: [-0.12684198358790866, -3.1004175783687753, -0.43107286397285005, -0.3205757726614378, -0.006699573816805429]\n55 Solução: [0.5709969964023953, 0.3228363095252221, 0.1127867411455474, 0.02090749445732783, 0.0004036254553991668]\n55 Tolerância: 3.149176967624902\n=====================================================================\n56 Derivada: [-2.8468468294347673, 0.13256749363131348, -1.7710814925967537, -0.1383894089721526, -0.012435325338249911]\n56 Solução: [0.5715079565413603, 0.3353257846333971, 0.11452324071965679, 0.022198876354035283, 0.00043061348469049726]\n56 Tolerância: 3.3582952416190497\n=====================================================================\n57 Derivada: [0.34804491647788893, -3.626137819676657, -0.2293956924091627, -0.3629989135461672, -0.007428930717278265]\n57 Solução: [0.5829759674509486, 0.33479176030602487, 0.12165772427137711, 0.022756353221232675, 0.00048070695834311533]\n57 Tolerância: 3.668031697670927\n=====================================================================\n58 Derivada: [-2.749008141398548, 0.03437967870975811, -1.757680312300722, -0.15791276426372833, -0.01370855783436735]\n58 Solução: [0.5818288467545493, 0.3467431422800568, 0.1224137891755577, 0.023952760773203685, 0.0005051919594864885]\n58 Tolerância: 3.2669239614212873\n=====================================================================\n59 Derivada: [0.35477244275305964, -3.590209859683977, -0.27417439535528154, -0.3863648684561261, -0.008839758675926762]\n59 Solução: [0.592902732089773, 0.3466046499219731, 0.12949428848048006, 0.024588883969090286, 0.0005604144214657515]\n59 Tolerância: 3.6386807876877603\n=====================================================================\n60 Derivada: [-2.7785212375071353, 0.09936679540112436, -1.817182772360728, -0.1757187992438775, -0.015861873107101743]\n60 Solução: [0.5917334381500351, 0.3584376169888026, 0.13039793944173403, 0.025862303335339724, 0.0005895493682736156]\n60 Tolerância: 3.3261593393728552\n=====================================================================\n61 Derivada: [-0.08674465952363164, -3.0424222113973443, -0.5260681674297834, -0.3785129704111947, -0.011464406601740168]\n61 Solução: [0.6008911619552955, 0.3581101141231007, 0.13638718930180968, 0.026441454651206998, 0.0006418284910631355]\n61 Tolerância: 3.1119140037171564\n=====================================================================\n62 Derivada: [-2.9320935721882364, 0.3180373628012241, -1.9424618688244986, -0.18657205583754022, -0.018819802275949954]\n62 Solução: [0.6012405972292554, 0.37036596531647376, 0.13850636038642672, 0.027966226138459123, 0.0006880107930476845]\n62 Tolerância: 3.536473088971475\n=====================================================================\n63 Derivada: [0.1715301138814027, -3.2962608221766203, -0.4471164540359469, -0.4255394687745583, -0.013368622047068929]\n63 Solução: [0.6109044798524343, 0.36931774646935056, 0.14490851742478872, 0.028581148685775236, 0.0007500389499630469]\n63 Tolerância: 3.3579657042314635\n=====================================================================\n64 Derivada: [-2.651360966839974, 0.013562535626896022, -1.8394306717530295, -0.22758537067007925, -0.020984141821413643]\n64 Solução: [0.6103391340181085, 0.38018188736275493, 0.14638216784702635, 0.029983683556003882, 0.0007941005704795094]\n64 Tolerância: 3.235064072038149\n=====================================================================\n65 Derivada: [0.6177646975267663, -3.7607317289466486, -0.2792692238508838, -0.48755790827317663, -0.015241530248348206]\n65 Solução: [0.6210196652565996, 0.3801272531250001, 0.1537919837854846, 0.030900470327502004, 0.0008786314152190907]\n65 Tolerância: 3.8523593648789682\n=====================================================================\n66 Derivada: [-2.264324062514163, -0.41074077669711784, -1.684778634599733, -0.2786828881102988, -0.023187988735328124]\n66 Solução: [0.6194360399333498, 0.38976780077000495, 0.1545078838954539, 0.03215031359822182, 0.0009177027207873662]\n66 Tolerância: 2.865755106123554\n=====================================================================\n67 Derivada: [1.093939544296049, -4.24478215960227, -0.10607723815634262, -0.5588421840708407, -0.017868537953629637]\n67 Solução: [0.6318743434994066, 0.39202406724356087, 0.16376264934430496, 0.03368116442402302, 0.0010450781471899255]\n67 Tolerância: 4.420266458128164\n=====================================================================\n68 Derivada: [-2.5476262619055916, -0.03318202043558571, -1.8700321310501258, -0.28649653371887013, -0.028418248128377316]\n68 Solução: [0.6290700551168118, 0.4029054668226194, 0.16403457585813347, 0.03511374326502493, 0.0010908837254401968]\n68 Tolerância: 3.173548274195736\n=====================================================================\n69 Derivada: [0.12889797913253176, -3.1174890847628305, -0.5869178445035677, -0.5080874941075357, -0.02312649304499978]\n69 Solução: [0.6374667725327604, 0.4030148313919261, 0.17019801183693636, 0.03605800674285812, 0.001184547385043003]\n69 Tolerância: 3.215355839590012\n=====================================================================\n70 Derivada: [-2.6287166078757735, 0.10116545430338775, -1.9488515718254344, -0.301467536805456, -0.032596002968953874]\n70 Solução: [0.6370419378847406, 0.41328975879531926, 0.17213243344357654, 0.03773261152080044, 0.0012607699573348723]\n70 Tolerância: 3.2879101383447042\n=====================================================================\n71 Derivada: [0.3193262019754428, -3.2952625142629444, -0.527165286266051, -0.551690773166272, -0.026303376116691424]\n71 Solução: [0.6457059208452687, 0.41295632773255175, 0.1785556502940754, 0.03872621790431451, 0.0013682030725889927]\n71 Tolerância: 3.397599458001267\n=====================================================================\n72 Derivada: [-2.835239420647426, 0.3815102474026979, -2.084732029120933, -0.30806223236085994, -0.037792592418159854]\n72 Solução: [0.644653454115125, 0.4238171783044633, 0.18029313353738394, 0.0405445346615764, 0.0014548963288329708]\n72 Tolerância: 3.553388649382156\n=====================================================================\n73 Derivada: [-0.13224806176101822, -2.744437534707764, -0.7702187521411128, -0.5401320492271601, -0.031348610767932716]\n73 Solução: [0.6519215239190307, 0.4228391857268929, 0.18563729523312852, 0.041334244974024895, 0.0015517767537330388]\n73 Tolerância: 2.9043741535658927\n=====================================================================\n74 Derivada: [-3.326447806821065, 1.011759442529943, -2.3742051191675073, -0.2942816708853852, -0.04533776753017149]\n74 Solução: [0.6525511228458715, 0.43590474527933465, 0.18930410814005033, 0.043905674407601465, 0.0017010194075354529]\n74 Tolerância: 4.220713132151733\n=====================================================================\n75 Derivada: [0.30412769349982227, -3.1988586672462134, -0.5895209342439265, -0.6109772516209417, -0.035455823962050936]\n75 Solução: [0.6610783938350367, 0.4333111236615211, 0.19539032731760375, 0.04466005857368949, 0.001817241711995121]\n75 Tolerância: 3.3237442254681215\n=====================================================================\n76 Derivada: [-2.8327215352084636, 0.460474644121561, -2.150402281549798, -0.3554237122605233, -0.04886820066186509]\n76 Solução: [0.6600760198452302, 0.4438542369446812, 0.19733332844365184, 0.04667377754265499, 0.0019341005067919199]\n76 Tolerância: 3.6040660763476895\n=====================================================================\n77 Derivada: [0.046243575521558, -2.867043643846749, -0.7395701853475702, -0.6132361217130946, -0.04098990283965226]\n77 Solução: [0.6673376351088262, 0.44267382098685004, 0.20284583429235128, 0.047584897898791584, 0.002059372993840158]\n77 Tolerância: 3.0243644780618992\n=====================================================================\n78 Derivada: [-3.1027386742330663, 0.8245899863251793, -2.3203005125131724, -0.3560119352701698, -0.056206107689742324]\n78 Solução: [0.6671513511742301, 0.4542231911341819, 0.2058250598925063, 0.05005520942424717, 0.002224493452056531]\n78 Tolerância: 3.9775160614502436\n=====================================================================\n79 Derivada: [0.3379129587802936, -3.160155272778397, -0.620831842142338, -0.6693940272420518, -0.04582879712146942]\n79 Solução: [0.6751051490451888, 0.4521093740305651, 0.21177309587429055, 0.05096783767628252, 0.0023685764917885364]\n79 Tolerância: 3.3070209153181263\n=====================================================================\n80 Derivada: [-2.879016266304646, 0.5975005988688622, -2.2320072095853742, -0.39602339841752743, -0.0615721100908575]\n80 Solução: [0.6739914222523339, 0.4625249248563728, 0.21381929457275772, 0.05317409240474143, 0.002519623552613692]\n80 Tolerância: 3.7132497179386803\n=====================================================================\n81 Derivada: [0.25511283706639176, -3.028462493136182, -0.6817857406230674, -0.6877987242803635, -0.051803367989368576]\n81 Solução: [0.6813717129740621, 0.4609932460750928, 0.21954099274186087, 0.05418928910478636, 0.002677462213735275]\n81 Tolerância: 3.1901802617003265\n=====================================================================\n82 Derivada: [-2.776475602127263, 0.5179292206954003, -2.206875994970936, -0.42618036730922965, -0.06782041985821308]\n82 Solução: [0.6805308869729888, 0.47097475087424767, 0.22178808929909022, 0.05645620384545651, 0.0028482008533486725]\n82 Tolerância: 3.6102075312738777\n=====================================================================\n83 Derivada: [0.2591053151712117, -2.995244431515019, -0.6989554206328377, -0.7147689056933866, -0.057959358710379605]\n83 Solução: [0.6876483171053951, 0.46964705145596114, 0.22744536418854208, 0.05754870722845136, 0.003022056910114111]\n83 Tolerância: 3.1688195475148104\n=====================================================================\n84 Derivada: [-2.778170399478199, 0.5619075694101667, -2.232714997189765, -0.44721677067141963, -0.07509312031978777]\n84 Solução: [0.6867943323019744, 0.47951907289772205, 0.229749050267288, 0.05990451294789978, 0.0032130850699261535]\n84 Tolerância: 3.6365682944862354\n=====================================================================\n85 Derivada: [0.34291823440872804, -3.0540840692781117, -0.6741777663566308, -0.7493207515878659, -0.06432661510879256]\n85 Solução: [0.693916107007668, 0.47807863601324774, 0.23547256283332624, 0.0610509426578729, 0.003405584523870922]\n85 Tolerância: 3.2349895796632056\n=====================================================================\n86 Derivada: [-2.884634028248229, 0.7300607200735456, -2.3103497396364236, -0.45797413076278304, -0.08345432718763024]\n86 Solução: [0.69278588333469, 0.4881445869251751, 0.2376945842800583, 0.06352062775221767, 0.003617598514097655]\n86 Tolerância: 3.7958560273717263\n=====================================================================\n87 Derivada: [0.5149770133158995, -3.2152310952894325, -0.6025338928650283, -0.7923861753199399, -0.07077278080778371]\n87 Solução: [0.7001805750574943, 0.4862730933800647, 0.24361711168879424, 0.06469463370265938, 0.0038315317258823516]\n87 Tolerância: 3.4057075696178556\n=====================================================================\n88 Derivada: [-2.310110480370156, 0.08898181782210912, -2.0357561436658145, -0.5304924933681788, -0.08787785038497442]\n88 Solução: [0.6988604435536327, 0.49451526293586034, 0.24516169320126563, 0.06672589709154106, 0.004012956090746055]\n88 Tolerância: 3.126975547394053\n=====================================================================\n89 Derivada: [0.793115621676435, -3.4939461207957976, -0.4803507776927143, -0.8454398926007575, -0.07722871283671517]\n89 Solução: [0.7064743330763371, 0.49422198790153454, 0.25187133869430484, 0.06847434647153872, 0.0043025925605207505]\n89 Tolerância: 3.713241392649876\n=====================================================================\n90 Derivada: [-2.5469937904598225, 0.41192004843334473, -2.1772316592591574, -0.5287539824629184, -0.09793415297413588]\n90 Solução: [0.7044411997688169, 0.5031786368928324, 0.25310270665469875, 0.07064161182122328, 0.004500566555829713]\n90 Tolerância: 3.4185325894763334\n=====================================================================\n91 Derivada: [0.3763932223729114, -2.9839128466808376, -0.6968833626118149, -0.8276351693890165, -0.08639164496172158]\n91 Solução: [0.7109703586554937, 0.5021226895030496, 0.2586839889843426, 0.0719970602625955, 0.0047516184616472005]\n91 Tolerância: 3.1974204674157463\n=====================================================================\n92 Derivada: [-2.188604044303162, 0.029392586985281355, -2.0121060860501885, -0.5815637716425099, -0.10409956862002845]\n92 Solução: [0.7100054834516569, 0.5097718801500586, 0.26047043315119417, 0.07411868362162496, 0.004973081418702395]\n92 Tolerância: 3.0312498460377677\n=====================================================================\n93 Derivada: [0.790826547091882, -3.4213237369568503, -0.5003795591340614, -0.8953980314404745, -0.093041592548045]\n93 Solução: [0.7172189001015818, 0.5096750051685397, 0.2671021304562912, 0.07603545874788811, 0.0053161830242615705]\n93 Tolerância: 3.6594585414375453\n=====================================================================\n94 Derivada: [-2.556823580567766, 0.5093080898517002, -2.2197760925508225, -0.5650602536949733, -0.11620395847358678]\n94 Solução: [0.715191634783109, 0.5184454883809535, 0.2683848417284855, 0.0783307906155944, 0.005554692966096159]\n94 Tolerância: 3.472311032132161\n=====================================================================\n95 Derivada: [0.5340178706368874, -3.0950359908208327, -0.6346520289347986, -0.891677119946453, -0.10243182148651053]\n95 Solução: [0.7217459921063418, 0.517139889029527, 0.27407518571573736, 0.07977930933234177, 0.005852579090112922]\n95 Tolerância: 3.327579665876679\n=====================================================================\n96 Derivada: [-2.3258545933244648, 0.2764312876813335, -2.116284292131109, -0.6048425797156741, -0.12390408448861034]\n96 Solução: [0.720377049811008, 0.5250739412520902, 0.2757021013172548, 0.082065102730642, 0.006115160663747775]\n96 Tolerância: 3.2164980677141113\n=====================================================================\n97 Derivada: [0.38340892899574897, -2.8857167627179265, -0.7194902625759738, -0.8974535989390988, -0.11175662048017876]\n97 Solução: [0.7263393235487783, 0.5243653161249774, 0.2811271464997198, 0.08361560250774518, 0.006432785880332348]\n97 Tolerância: 3.1320827549533923\n=====================================================================\n98 Derivada: [-2.1801158501110933, 0.14516576642783718, -2.055476034822906, -0.6367439573022893, -0.1324645426787363]\n98 Solução: [0.7253564637454445, 0.5317627834122182, 0.2829715429247803, 0.08591620377455683, 0.006719271357637493]\n98 Tolerância: 3.069519452879809\n=====================================================================\n99 Derivada: [0.30690460568175126, -2.761821706162678, -0.7654700126336174, -0.9105296427896599, -0.12117911795875536]\n99 Solução: [0.730945139630739, 0.531390654372303, 0.2882407075648292, 0.08754848198541475, 0.007058841108156714]\n99 Tolerância: 3.025151652874486\n=====================================================================\n100 Derivada: [-2.7854840456742522, 0.9151054248803305, -2.390890737477653, -0.5908603941372397, -0.14819518190453973]\n100 Solução: [0.729933613220411, 0.5404933382182981, 0.29076361898342146, 0.09054949521238262, 0.007458235173694604]\n100 Tolerância: 3.8319414978842485\n=====================================================================\n101 Derivada: [-0.10148610518737655, -2.251988573716055, -0.9802909425827924, -0.8877190537941295, -0.13334470196818327]\n101 Solução: [0.7350339868391993, 0.5388177301405144, 0.2951414706755802, 0.09163139290672571, 0.007729588656185827]\n101 Tolerância: 2.6169731037623913\n=====================================================================\n102 Derivada: [-3.045437510191988, 1.289965079420952, -2.5535454800142254, -0.5821742216088942, -0.16485400214268098]\n102 Solução: [0.73551713602161, 0.5495388671491723, 0.29980838312195823, 0.09585759445677101, 0.008364408404325372]\n102 Tolerância: 4.2220134674630465\n=====================================================================\n103 Derivada: [0.0877206210983843, -2.4263866785235138, -0.8878461224146239, -0.9353235604783291, -0.14558333525450773]\n103 Solução: [0.7410934986501353, 0.5471768705437873, 0.30448406454288274, 0.09692358729418965, 0.008666265097701863]\n103 Tolerância: 2.7530605929508023\n=====================================================================\n104 Derivada: [-2.9645000350660666, 1.240347520824102, -2.52414657117048, -0.6049227718799015, -0.17720612535952784]\n104 Solução: [0.7407401318903396, 0.556951133286863, 0.3080605931121799, 0.10069137019162433, 0.009252721404269094]\n104 Tolerância: 4.134654993330967\n=====================================================================\n105 Derivada: [0.11527763234249733, -2.420211834488356, -0.8760067819974289, -0.9586614182028299, -0.15716886493228466]\n105 Solução: [0.7461682935756413, 0.5546799891447289, 0.3126824435232587, 0.10179901686865052, 0.00957719551076237]\n105 Tolerância: 2.7535133748133926\n=====================================================================\n106 Derivada: [-3.000747722868738, 1.334775634178385, -2.558749903967076, -0.614115284837567, -0.19077703572363047]\n106 Solução: [0.7457039183476991, 0.5644293776381513, 0.3162112794370667, 0.10566081213240704, 0.01021032204186166]\n106 Tolerância: 4.212700727725667\n=====================================================================\n107 Derivada: [0.19764102397834904, -2.4782626537092085, -0.8335516273295465, -0.9882554194078266, -0.16869062091408793]\n107 Solução: [0.7511984515316629, 0.5619853304564282, 0.3208964904428658, 0.10678529080337426, 0.010559645227390769]\n107 Tolerância: 2.8072693796866153\n=====================================================================\n108 Derivada: [-2.5501699050845446, 0.8332082718232243, -2.324700617065572, -0.676739444402408, -0.1987430176848899]\n108 Solução: [0.7505470467895468, 0.570153432464503, 0.3236437919489568, 0.11004248029605142, 0.011115632381282416]\n108 Tolerância: 3.6192969746495223\n=====================================================================\n109 Derivada: [0.03130619987982186, -2.2454291976639666, -0.9251668320286957, -0.9846097299983995, -0.18081176861521975]\n109 Solução: [0.7552165473481732, 0.5686277825527173, 0.32790044591086887, 0.11128162722794059, 0.01147954171542223]\n109 Tolerância: 2.626978348414021\n=====================================================================\n110 Derivada: [-2.7585711433713413, 1.1380174595494026, -2.4517619558972594, -0.6657302409636215, -0.21483613651173084]\n110 Solução: [0.7550904359472902, 0.5776730905999476, 0.33162731425278136, 0.11524795060317829, 0.012207909435673969]\n110 Tolerância: 3.924956878610295\n=====================================================================\n111 Derivada: [0.19706081304386203, -2.402240706790849, -0.8346167085390164, -1.0236800962429191, -0.1926523497073771]\n111 Solução: [0.7601415305701625, 0.5755893183961829, 0.3361166244747612, 0.11646693908150522, 0.012601286150478163]\n111 Tolerância: 2.755215834533602\n=====================================================================\n112 Derivada: [-2.5099848631949726, 0.8791908655463487, -2.3238847500726454, -0.7044125473588427, -0.2251177839013243]\n112 Solução: [0.7594920381443588, 0.5835068597881937, 0.33886743638034633, 0.11984088471121211, 0.013236248728859411]\n112 Tolerância: 3.608367494678645\n=====================================================================\n113 Derivada: [0.11908910170330955, -2.2745966638982935, -0.8763777890639979, -1.0285967074974238, -0.20483908239862547]\n113 Solução: [0.764087957693666, 0.5818970132326279, 0.3431225964451766, 0.12113070260798733, 0.013648451702311543]\n113 Tolerância: 2.656309557088349\n=====================================================================\n114 Derivada: [-2.9048117056725573, 1.4126340024241415, -2.554449749515122, -0.6671231124214168, -0.24400419244193117]\n114 Solução: [0.7636082286462772, 0.5910598171765541, 0.34665292689428695, 0.1252742196181698, 0.014473609138731787]\n114 Tolerância: 4.178907834120191\n=====================================================================\n115 Derivada: [0.39157075153005394, -2.5620903490044213, -0.721852045747255, -1.079761896660791, -0.21615655291119973]\n115 Solução: [0.7689270977362538, 0.5884732070646935, 0.3513302640821198, 0.12649575852030864, 0.014920394159072236]\n115 Tolerância: 2.9071144447220973\n=====================================================================\n116 Derivada: [-2.102854867440101, 0.4696371592827546, -2.1118212628368163, -0.7722199931897903, -0.24691969088963361]\n116 Solução: [0.767923315292146, 0.5950410656253738, 0.3531807148829856, 0.12926370283547914, 0.015474506416290888]\n116 Tolerância: 3.1240422136546835\n=====================================================================\n117 Derivada: [0.8387672328953784, -3.061528389445513, -0.47305796873636297, -1.1481803456802426, -0.22346336889402352]\n117 Solução: [0.7733139344591677, 0.5938371617746734, 0.3585943191944569, 0.13124327068911507, 0.01610747925670621]\n117 Tolerância: 3.4159221852361696\n=====================================================================\n118 Derivada: [-1.6300228855513694, -0.07164523870677897, -1.8497289989888586, -0.8391091866683275, -0.2528819839343184]\n118 Solução: [0.7717781057856532, 0.5994429876830819, 0.35946051420557085, 0.13334565169316825, 0.016516652905804154]\n118 Tolerância: 2.6175672947033526\n=====================================================================\n119 Derivada: [0.8859545214485536, -3.082708838353682, -0.44302345454466163, -1.1678772829403692, -0.23523033497491674]\n119 Solução: [0.7771504956672312, 0.5996791231133899, 0.36555703312313664, 0.1361112703504003, 0.017350126241525175]\n119 Tolerância: 3.450152117793695\n=====================================================================\n120 Derivada: [-1.6524524563755563, -0.001659746419548469, -1.8666923376452367, -0.8450264228299269, -0.2664277092748626]\n120 Solução: [0.775528264487821, 0.6053237315820551, 0.36636823329625307, 0.13824971752375304, 0.017780845849023192]\n120 Tolerância: 2.645788264074323\n=====================================================================\n121 Derivada: [0.9964721960093641, -3.184089985528388, -0.3727497130270194, -1.1957970961845585, -0.2463722465812883]\n121 Solução: [0.7809745799568322, 0.6053292019376859, 0.3725206616551912, 0.1410348387904044, 0.018658964519728916]\n121 Tolerância: 3.5722483397788225\n=====================================================================\n122 Derivada: [-1.7147519146212744, 0.1157070640636988, -1.9048636817817162, -0.8444072880886366, -0.28062913235785736]\n122 Solução: [0.7791499848713658, 0.6111594448311095, 0.3732031867644936, 0.14322440866867203, 0.01911008557670149]\n122 Tolerância: 2.7155191543510964\n=====================================================================\n123 Derivada: [1.1817481496436528, -3.376848032566315, -0.2585147081498178, -1.2327760503121161, -0.2566352489843311]\n123 Solução: [0.7848016330274662, 0.6107780860994543, 0.37948142399692847, 0.146007489330097, 0.020035010695556733]\n123 Tolerância: 3.801586460914359\n=====================================================================\n124 Derivada: [-1.8244928610284603, 0.29120038536042614, -1.9699769481233673, -0.8360177223069867, -0.295503612343901]\n124 Solução: [0.7826377875386167, 0.61696127951846, 0.37995477856507387, 0.14826476969565874, 0.02050492387118722]\n124 Tolerância: 2.842644729373509\n=====================================================================\n125 Derivada: [0.7240623602508833, -2.7999957826898765, -0.5087737945239397, -1.1815149286034503, -0.2720180743135927]\n125 Solução: [0.7873148322263117, 0.6162147941555977, 0.3850047683002534, 0.15040788153262732, 0.021262440455564895]\n125 Tolerância: 3.176958422433878\n=====================================================================\n126 Derivada: [-1.5482063620403324, -0.012611023149219136, -1.813485555176765, -0.8773627143670262, -0.30349623045402474]\n126 Solução: [0.785989034447532, 0.6213417395584722, 0.3859363609415936, 0.1525712999809979, 0.021760520425621523]\n126 Tolerância: 2.558848790113063\n=====================================================================\n127 Derivada: [0.9880729063354465, -3.086477141004792, -0.34959014165257685, -1.2281484391036537, -0.28158613761249196]\n127 Solução: [0.7910917653771082, 0.6213833042099649, 0.39191342514932953, 0.15546299838040092, 0.02276081317736208]\n127 Tolerância: 3.494636149685432\n=====================================================================\n128 Derivada: [-1.7057141539125098, 0.2229254796915825, -1.9061155259241715, -0.8611511600670416, -0.31932014960199684]\n128 Solução: [0.7892825498503709, 0.6270348126468633, 0.39255354381690627, 0.15771180533676749, 0.023276412794572456]\n128 Tolerância: 2.726899268573928\n=====================================================================\n129 Derivada: [0.6746438156199019, -2.6808816349956714, -0.5201612389919923, -1.1933531591064488, -0.29584295195801147]\n129 Solução: [0.7936551081062502, 0.6264633484044899, 0.39743982629303026, 0.15991934615236902, 0.024094982514011168]\n129 Tolerância: 3.069927221927826\n=====================================================================\n130 Derivada: [-1.506497563735536, 0.01160581058651644, -1.7908373707981937, -0.8917671594176269, -0.32823253391792484]\n130 Solução: [0.7924197983852664, 0.6313721892888814, 0.39839226996794236, 0.16210444104819383, 0.024636687137957722]\n130 Tolerância: 2.5258173639370343\n=====================================================================\n131 Derivada: [1.0265542128854008, -3.0776407355326967, -0.30601885857043953, -1.25162367054327, -0.3041773260724945]\n131 Solução: [0.79738506135168, 0.6313339377159034, 0.4042946880601727, 0.1650436150355322, 0.025718508233634477]\n131 Tolerância: 3.504057540789617\n=====================================================================\n132 Derivada: [-1.726489187591369, 0.3249220155010022, -1.9203613378724782, -0.8642602875800407, -0.3451329381741228]\n132 Solução: [0.7955053844482033, 0.6369692662111415, 0.4048550253256215, 0.1673354064244664, 0.026275473552370733]\n132 Tolerância: 2.7640892693115306\n=====================================================================\n133 Derivada: [0.8009069120558365, -2.778297283963184, -0.4264417930808193, -1.2266368597196067, -0.31745996922741426]\n133 Solução: [0.7999311990160034, 0.6361363362397644, 0.40977782660678874, 0.16955091741557735, 0.02716021375032686]\n133 Tolerância: 3.185539059629728\n=====================================================================\n134 Derivada: [-1.5776702682345558, 0.17417960271797028, -1.8325221934723253, -0.8868808540525617, -0.3545387955256061]\n134 Solução: [0.7984646946604325, 0.6412235505048337, 0.41055866485095527, 0.17179695658742722, 0.027741500315074323]\n134 Tolerância: 2.6057196713500583\n=====================================================================\n135 Derivada: [0.6462492903950761, -2.561863740092349, -0.5084632248721235, -1.2104274054829949, -0.33003421018577495]\n135 Solução: [0.8025090154164048, 0.6407770451756006, 0.41525629254418267, 0.17407045487052095, 0.028650352207901193]\n135 Tolerância: 2.968733046776133\n=====================================================================\n136 Derivada: [-1.469549348984259, 0.07395993948890123, -1.767837918099982, -0.9039356612332425, -0.36447555393668907]\n136 Solução: [0.8013256976239334, 0.645467957785633, 0.4161873165155061, 0.17628681364520904, 0.029254662895497215]\n136 Tolerância: 2.498046912187194\n=====================================================================\n137 Derivada: [1.1229569894324811, -3.117489548619403, -0.21359716633133985, -1.2869054613046251, -0.33607901488763403]\n137 Solução: [0.8061691830270797, 0.6452241933366339, 0.4220139307475251, 0.1792660937786682, 0.030455937304224096]\n137 Tolerância: 3.5769361199097145\n=====================================================================\n138 Derivada: [-1.786038636094844, 0.5094076719096279, -1.9568849492637774, -0.8583080369336482, -0.383082361566526]\n138 Solução: [0.8041129873677185, 0.6509324871878658, 0.4224050388401728, 0.18162248805595937, 0.03107131635980448]\n138 Tolerância: 2.8569693159863117\n=====================================================================\n139 Derivada: [0.2180739526757236, -1.98064156312806, -0.7447823307705086, -1.157011785440686, -0.3574632131543689]\n139 Solução: [0.8073833217843961, 0.6499997338823672, 0.4259882021994204, 0.18319409701030565, 0.031772761113649434]\n139 Tolerância: 2.4477855688481416\n=====================================================================\n140 Derivada: [-2.2645844624713334, 1.1560952638700144, -2.250507807699613, -0.7859879318992702, -0.4041755772402569]\n140 Solução: [0.8066645721845127, 0.6565277273155286, 0.4284429291196845, 0.1870074903461087, 0.03295092355934865]\n140 Tolerância: 3.508677723253442\n=====================================================================\n141 Derivada: [0.5958500353932266, -2.4201997322168864, -0.503879291843397, -1.2187710994905543, -0.3642318031161045]\n141 Solução: [0.8108111501797605, 0.6544108536634228, 0.43256373199022824, 0.1884466772331313, 0.03369099114462744]\n141 Tolerância: 2.8433019798473786\n=====================================================================\n142 Derivada: [-1.4156104762013513, 0.1114977904204153, -1.7303454794372755, -0.9125003980176798, -0.4000599692576454]\n142 Solução: [0.8097201161794066, 0.6588423717277847, 0.4334863625294923, 0.190678313767843, 0.03435791949505976]\n142 Tolerância: 2.450139835028507\n=====================================================================\n143 Derivada: [1.1759043346484361, -3.107971410178962, -0.1418503546082448, -1.3105544308084012, -0.36754425552559145]\n143 Solução: [0.8143858245360273, 0.6584748863345533, 0.4391894054915048, 0.1936858224038876, 0.03567647652264233]\n143 Tolerância: 3.593744636130768\n=====================================================================\n144 Derivada: [-1.819303077125653, 0.6599638041348328, -1.9765632505207975, -0.8491180656334318, -0.4200121400219965]\n144 Solução: [0.8122326793920177, 0.6641657519537775, 0.43944914124823375, 0.19608551923764322, 0.036349470154586165]\n144 Tolerância: 2.923975013516232\n=====================================================================\n145 Derivada: [0.3446451703254638, -2.0544941555606755, -0.6385234501317569, -1.1849512661099664, -0.3886306336947598]\n145 Solução: [0.8155639228193718, 0.662957322136636, 0.4430683366532401, 0.19764030085196224, 0.03711853535238035]\n145 Tolerância: 2.5104943731327913\n=====================================================================\n146 Derivada: [-1.8435037453464247, 0.7263587575218793, -1.9927711832469512, -0.8424720906116718, -0.43136738586445933]\n146 Solução: [0.8146804330028637, 0.6682239697522091, 0.4447051765522595, 0.20067789565033983, 0.038114780873326384]\n146 Tolerância: 2.96531231703536\n=====================================================================\n147 Derivada: [0.39580271071903894, -2.0925964662537524, -0.5970529546206294, -1.194882263470205, -0.39769915148878976]\n147 Solução: [0.8180559891772041, 0.6668939671444419, 0.44835404956845876, 0.20222050812094225, 0.03890463814724812]\n147 Tolerância: 2.54519158918578\n=====================================================================\n148 Derivada: [-1.9025132799164908, 0.835202853015474, -2.0291592837355026, -0.8298852478059464, -0.44344194660230674]\n148 Solução: [0.8170413582049019, 0.6722582891404537, 0.44988458082420013, 0.20528356079829507, 0.039924130601015774]\n148 Tolerância: 3.0528606698056415\n=====================================================================\n149 Derivada: [0.47215297235723597, -2.1637593176999417, -0.5387867545744456, -1.2083325743679452, -0.4062871793835807]\n149 Solução: [0.820524964064124, 0.6707289870414264, 0.45360008244236816, 0.20680312607137724, 0.04073609705597605]\n149 Tolerância: 2.6115533430420457\n=====================================================================\n150 Derivada: [-1.9997711047352027, 0.9928767770243638, -2.0900145525418097, -0.8101678997388366, -0.45613632782031566]\n150 Solução: [0.8193146109855716, 0.6762757333392412, 0.45498124965990516, 0.20990065830547475, 0.04177760471797009]\n150 Tolerância: 3.196479621528866\n=====================================================================\n151 Derivada: [0.5783092820515776, -2.2737293243883414, -0.46073996813370144, -1.2263345828320489, -0.4140861848419828]\n151 Solução: [0.822976301240824, 0.6744577216625608, 0.45880818060328005, 0.21138412003595358, 0.042612815279164516]\n151 Tolerância: 2.7188107440668174\n=====================================================================\n152 Derivada: [-1.3664187732191522, 0.2092495733188855, -1.6876742826225808, -0.9090955686452942, -0.4533157424226655]\n152 Solução: [0.8219173853190986, 0.6786210444000883, 0.45965182068164984, 0.21362960572229156, 0.04337102972894842]\n152 Tolerância: 2.4064662987925334\n=====================================================================\n153 Derivada: [0.71854292192765, -2.4241771435139583, -0.3631800440806785, -1.2493170350454617, -0.42112991541644895]\n153 Solução: [0.8254201678188059, 0.6780846380231722, 0.4639781341502868, 0.21596005090558637, 0.04453309401006121]\n153 Tolerância: 2.874540303756361\n=====================================================================\n154 Derivada: [-1.4644147867298614, 0.36637275034283334, -1.74731287087193, -0.8894136893080429, -0.4655634321373956]\n154 Solução: [0.8241044764334403, 0.6825234389451338, 0.4646431366724072, 0.21824761871877996, 0.04530420591573098]\n154 Tolerância: 2.5178657526908856\n=====================================================================\n155 Derivada: [0.9026023963863281, -2.6345489443167907, -0.23349577297936008, -1.28026593793453, -0.4269519701401485]\n155 Solução: [0.8278584694170007, 0.6815842509864912, 0.469122332264242, 0.22052760986568778, 0.04649766686237225]\n155 Tolerância: 3.1034542623670522\n=====================================================================\n156 Derivada: [-1.6008780019901394, 0.5723401775077548, -1.8319672545507117, -0.8620228157009899, -0.4784841666039181]\n156 Solução: [0.8262057550681488, 0.6864082541804306, 0.4695498757938673, 0.2228718468125894, 0.04727943926863473]\n156 Tolerância: 2.686730003232802\n=====================================================================\n157 Derivada: [0.3573958485344235, -1.924191973009556, -0.5706255885637148, -1.1892221927994457, -0.4444696500557672]\n157 Solução: [0.8291370502378085, 0.6853602680155605, 0.47290430802265887, 0.22445025773001065, 0.04815556994478937]\n157 Tolerância: 2.4015958855592574\n=====================================================================\n158 Derivada: [-1.782367513053316, 0.839312115464935, -1.9482609073259525, -0.8261432651018952, -0.4921493915435402]\n158 Solução: [0.8282208743565557, 0.6902928890401211, 0.47436709334490473, 0.22749880094885688, 0.0492949574754499]\n158 Tolerância: 2.9328694242735267\n=====================================================================\n159 Derivada: [0.5311681708547269, -2.1224758024333994, -0.44704403383569513, -1.2178193956375791, -0.4500327647015965]\n159 Solução: [0.8314844867461797, 0.6887560626568234, 0.4779344656117369, 0.22901151444696827, 0.05019610992578597]\n159 Tolerância: 2.5831192569311603\n=====================================================================\n160 Derivada: [-1.2998567262866914, 0.24097026657136666, -1.632673894982112, -0.9032851350715569, -0.4904888822277691]\n160 Solução: [0.8305118887770854, 0.6926424319239745, 0.47875302768541067, 0.23124140835987889, 0.05102014452912142]\n160 Tolerância: 2.3387649741755965\n=====================================================================\n161 Derivada: [0.7587478166813924, -2.3885387303877224, -0.2893602501180652, -1.255188140664588, -0.454270328593223]\n161 Solução: [0.8338440410295292, 0.6920247102933594, 0.4829383489494029, 0.23355695863288947, 0.05227750128287913]\n161 Tolerância: 2.8541905455197947\n=====================================================================\n162 Derivada: [-1.4730142091606808, 0.494763947842614, -1.7423135112309822, -0.8679382383470378, -0.5036832957648762]\n162 Solução: [0.8324547322831644, 0.6963982553319111, 0.4834681833917578, 0.23585527676154777, 0.053109295097441916]\n162 Tolerância: 2.5411084675608864\n=====================================================================\n163 Derivada: [0.3338766493235994, -1.8266653010225582, -0.5552728227410313, -1.180457930561725, -0.46966658261829863]\n163 Solução: [0.8351519018556022, 0.6954923154860079, 0.48665845471359187, 0.2374445191413336, 0.054031566757167644]\n163 Tolerância: 2.317450165861739\n=====================================================================\n164 Derivada: [-1.7080248848634199, 0.8311716863827598, -1.8950694338438367, -0.820543230825522, -0.5181122416395887]\n164 Solução: [0.8342960168902951, 0.7001749291727112, 0.4880818835804817, 0.24047059537934584, 0.05523554603389912]\n164 Tolerância: 2.8532814490051006\n=====================================================================\n165 Derivada: [0.5541806974560473, -2.0878993725924033, -0.39851237832539965, -1.2170158699120233, -0.473344283824213]\n165 Solução: [0.8374235038620909, 0.6986530083602428, 0.4915518593504594, 0.2419730549084453, 0.056184237882604425]\n165 Tolerância: 2.5554715004926125\n=====================================================================\n166 Derivada: [-1.2962248983581048, 0.3189179777190816, -1.619975748578696, -0.8871793982797413, -0.5166808943065604]\n166 Solução: [0.836408768698292, 0.7024760662934564, 0.4922815573088189, 0.24420147752180962, 0.05705095715230208]\n166 Tolerância: 2.336723244394262\n=====================================================================\n167 Derivada: [0.8519471225115183, -2.448238083763016, -0.19090281339481407, -1.2671729017276148, -0.4749668887098064]\n167 Solução: [0.839731610844962, 0.7016585275322136, 0.4964343271721188, 0.2464757411160326, 0.058375456515148486]\n167 Tolerância: 2.9304348153045088\n=====================================================================\n168 Derivada: [-1.5338666091474522, 0.6572632897724588, -1.7745681241268727, -0.8378998446019672, -0.530860948410572]\n168 Solução: [0.8381716490727851, 0.7061413853516039, 0.49678388066344226, 0.24879600399761392, 0.059245146863127866]\n168 Tolerância: 2.6301565219656715\n=====================================================================\n169 Derivada: [0.45898193264616793, -1.9250277186000062, -0.440190073105299, -1.194804953036968, -0.4896097976251035]\n169 Solução: [0.8409802427174643, 0.7049379003239443, 0.5000332119454128, 0.2503302444357279, 0.06021718229112574]\n169 Tolerância: 2.403630600168539\n=====================================================================\n170 Derivada: [-1.8664670814622184, 1.1232522536200236, -1.9962510800319464, -0.7698988585457069, -0.5471554106072976]\n170 Solução: [0.8398036532905149, 0.7098726637627383, 0.5011616288808635, 0.25339309892959705, 0.06147228553210806]\n170 Tolerância: 3.102021383182277\n=====================================================================\n171 Derivada: [0.7678434542654031, -2.3052987920605688, -0.2201471161463573, -1.2481046953298431, -0.49006460926197626]\n171 Solução: [0.8432212565890908, 0.7078159274585024, 0.5048168737783829, 0.25480282584343805, 0.06247415701149154]\n171 Tolerância: 2.783950339973254\n=====================================================================\n172 Derivada: [-1.4558733898066123, 0.6069447429031243, -1.7169527066364907, -0.8376298826238493, -0.5445673988928927]\n172 Solução: [0.841815293232892, 0.712037055617793, 0.5052199751873423, 0.25708817379631255, 0.06337149211145854]\n172 Tolerância: 2.536543576821128\n=====================================================================\n173 Derivada: [0.43973381987345306, -1.8611164855188917, -0.4318434528485895, -1.184778049595674, -0.5033345325747884]\n173 Solução: [0.8444810770277038, 0.7109257066012467, 0.5083638094890448, 0.258621919919281, 0.06436862479986105]\n173 Tolerância: 2.3453472773269404\n=====================================================================\n174 Derivada: [-1.8147836590280804, 1.109091631685743, -1.958885805908892, -0.7632284952329567, -0.5613116385054848]\n174 Solução: [0.8433538296867196, 0.7156966350919567, 0.5094708300590912, 0.261659070681184, 0.06565891107721342]\n174 Tolerância: 3.0427530410138934\n=====================================================================\n175 Derivada: [-0.26384805405592715, -0.924846316605823, -0.8993247480444069, -1.0510092294132969, -0.5258784566055503]\n175 Solução: [0.8453476105621166, 0.7144781506333175, 0.5116229262814033, 0.2624975785494819, 0.06627558646131368]\n175 Tolerância: 1.7649107792881094\n=====================================================================\n176 Derivada: [-5.431528469630308, 6.4187585096140936, -4.437723573724163, -0.049249607345219104, -0.9151475514577427]\n176 Solução: [0.85375390232342, 0.7439440794216623, 0.5402757289229548, 0.2959831020940218, 0.08303025896791141]\n176 Tolerância: 9.551715730835891\n=====================================================================\n177 Derivada: [0.4857112082324022, -1.6029434696642113, -0.23016402446417317, -1.2118677067643358, -0.7204718421750691]\n177 Solução: [0.8597211577221837, 0.7368922207074866, 0.5451511576538529, 0.29603720932865396, 0.08403567009622193]\n177 Tolerância: 2.201368128452089\n=====================================================================\n178 Derivada: [-1.0502170888941862, 0.45104341984773555, -1.3956297337524006, -0.8524269095608261, -0.7203741207805976]\n178 Solução: [0.8588317939375785, 0.7398272978614128, 0.5455726005697419, 0.2982562053737547, 0.08535489344004835]\n178 Tolerância: 2.1212611730197097\n=====================================================================\n179 Derivada: [0.9472892455765702, -2.2718365128277185, 0.04840355287545606, -1.2558864650304997, -0.6126936539135208]\n179 Solução: [0.8615240008304957, 0.7386710586259633, 0.5491502646821442, 0.30044138177765817, 0.08720155561490095]\n179 Tolerância: 2.830827668072698\n=====================================================================\n180 Derivada: [-0.5321881136812578, -0.30196496672289186, -1.016947874323904, -0.9447469994323647, -0.644263899485356]\n180 Solução: [0.860483277782377, 0.7411669727635913, 0.5490970869507137, 0.30182113985691533, 0.08787468096710087]\n180 Tolerância: 1.648093709010508\n=====================================================================\n181 Derivada: [3.188299014492941, -5.338681607504867, 1.7109558881079323, -1.6731977493356105, -0.4424981781135493]\n181 Solução: [0.8696432538601427, 0.7463643678207115, 0.5666007063696188, 0.31808204402194995, 0.09896369583861395]\n181 Tolerância: 6.677539764703328\n=====================================================================\n182 Derivada: [-0.9711223479383762, 0.25004324536919853, -1.3244930994338233, -0.78706706225001, -0.5798285682456523]\n182 Solução: [0.8661404839467594, 0.7522296186102065, 0.5647209940902034, 0.3199202739711712, 0.09944983885656096]\n182 Tolerância: 1.927578257477653\n=====================================================================\n183 Derivada: [0.7550587349019793, -2.0539860815510735, -0.0316636740945313, -1.1710731438065363, -0.5415593805899128]\n183 Solução: [0.8686299333250194, 0.7515886386110912, 0.568116301107795, 0.3219379019383648, 0.10093621580152662]\n183 Tolerância: 2.5406050412183965\n=====================================================================\n184 Derivada: [-0.5308760903466805, -0.29911531132628966, -0.9874563532030862, -0.8873028517151464, -0.5885749235231081]\n184 Solução: [0.8678004010234913, 0.7538452151167014, 0.5681510878591195, 0.3232244813199882, 0.10153119070695987]\n184 Tolerância: 1.574832128677467\n=====================================================================\n185 Derivada: [2.9419613983276918, -4.8198114066582605, 1.645672453658662, -1.6360624599041032, -0.5804458227914644]\n185 Solução: [0.8761601441454056, 0.7585554049557018, 0.5837006324835333, 0.3371969017144284, 0.11079952141185256]\n185 Tolerância: 6.132501481095439\n=====================================================================\n186 Derivada: [-0.8991677142649905, 0.37842011318321056, -1.243242007979788, -0.7710821670414898, -0.6959879970454246]\n186 Solução: [0.8729280088200944, 0.7638506079171183, 0.5818926427273166, 0.33899433361617853, 0.11143721823864983]\n186 Tolerância: 1.8911166149141725\n=====================================================================\n187 Derivada: [0.854055979397458, -2.0485147329416975, 0.10404407154724993, -1.1729042421631917, -0.6079690472425625]\n187 Solução: [0.8752330041813694, 0.7628805368261946, 0.5850796644762882, 0.3409709846791511, 0.11322136715685709]\n187 Tolerância: 2.584952633905951\n=====================================================================\n188 Derivada: [-0.5101535324951101, -0.1809061465197317, -0.939692011780906, -0.8552053425728552, -0.6504630604092263]\n188 Solução: [0.8742947102586915, 0.7651311023286941, 0.5849653582453403, 0.342259575765512, 0.11388930190114213]\n188 Tolerância: 1.526591991493731\n=====================================================================\n189 Derivada: [2.353595790950749, -4.105754386699232, 1.2730424784494971, -1.4910185295367633, -0.5215989175480082]\n189 Solução: [0.8800862481642147, 0.7671848464237057, 0.5956332485158023, 0.35196832782352905, 0.12127370920315117]\n189 Tolerância: 5.149026225338839\n=====================================================================\n190 Derivada: [-0.8376336232959716, 0.27599726745927455, -1.1842608340575538, -0.7431610794279067, -0.638147618981332]\n190 Solução: [0.8775005106009143, 0.7716955629129992, 0.5942346422772714, 0.35360640970412366, 0.1218467548889339]\n190 Tolerância: 1.771947819545376\n=====================================================================\n191 Derivada: [0.7417352954626608, -1.8947285227490056, 0.05571307234147582, -1.1198660822647355, -0.5811570241081547]\n191 Solução: [0.8796477647621954, 0.7709880503865534, 0.5972704671692647, 0.3555114857133993, 0.12348263135360772]\n191 Tolerância: 2.39480999220411\n=====================================================================\n192 Derivada: [-0.4962986339764939, -0.17719403935541322, -0.9108753920373118, -0.8213522166220066, -0.6287215145096035]\n192 Solução: [0.8788328700284107, 0.773069661312425, 0.5972092589599052, 0.3567418073369812, 0.12412110952950779]\n192 Tolerância: 1.4755725153465964\n=====================================================================\n193 Derivada: [2.1332648960068923, -3.750768771430984, 1.1703175837741355, -1.4331180330896736, -0.5532430071093302]\n193 Solução: [0.8841036196812373, 0.774951482775306, 0.6068828623665514, 0.3654646641297198, 0.13079820569190226]\n193 Tolerância: 4.727434173190602\n=====================================================================\n194 Derivada: [-0.7907896978042572, 0.2951517145650939, -1.130639069429563, -0.7214566471035582, -0.6623419467024796]\n194 Solução: [0.8817599448687298, 0.7790722004197004, 0.6055971130679715, 0.36703913462505755, 0.13140601661279874]\n194 Tolerância: 1.7175576072132632\n=====================================================================\n195 Derivada: [0.7447175493163627, -1.843094470170655, 0.09650249065805383, -1.0989009860100083, -0.5949727515624055]\n195 Solução: [0.8837871157249174, 0.7783155859170311, 0.6084954798231009, 0.36888857183076734, 0.13310391466953117]\n195 Tolerância: 2.3499979015080275\n=====================================================================\n196 Derivada: [-0.483523042409729, -0.12894420027680553, -0.8813889767834411, -0.7924802745430526, -0.6426749421889362]\n196 Solução: [0.8829689445891938, 0.7803404699784979, 0.608389459020376, 0.3700958605116865, 0.13375757125694104]\n196 Tolerância: 1.438166796508295\n=====================================================================\n197 Derivada: [1.8868616940099514, -3.4037874194772257, 1.0281061447515611, -1.3638787027744605, -0.5505035613973597]\n197 Solução: [0.8873957302557868, 0.7815209893902119, 0.6164588161076268, 0.3773512341189532, 0.13964143608411025]\n197 Tolerância: 4.285584835373391\n=====================================================================\n198 Derivada: [-0.7478380387271386, 0.27317052828729516, -1.084313175761622, -0.7012308633026159, -0.6561618695899689]\n198 Solução: [0.8853227620860981, 0.7852605019360243, 0.6153293049622698, 0.37884963601409116, 0.1402462373600595]\n198 Tolerância: 1.6528436270785578\n=====================================================================\n199 Derivada: [0.7093733404956879, -1.765565863499063, 0.10000987285081919, -1.0696891500163943, -0.5928033775760575]\n199 Solução: [0.887239827370921, 0.7845602356891941, 0.6181089163747446, 0.38064722489706904, 0.14192829293395956]\n199 Tolerância: 2.264086140445018\n=====================================================================\n200 Derivada: [-0.4709443343750195, -0.10600052391089321, -0.8558081525420818, -0.7663584121785867, -0.6417589235688368]\n200 Solução: [0.8864604865427397, 0.786499944279464, 0.6179990422468568, 0.38182242049645226, 0.14257956617592543]\n200 Tolerância: 1.4016390416237419\n=====================================================================\n201 Derivada: [1.7283230664617122, -3.162852851272845, 0.9463667039093764, -1.3139545399934, -0.554857852133047]\n201 Solução: [0.8904271807653809, 0.7873927709579127, 0.6252073872816961, 0.3882773436459057, 0.14798500632805553]\n201 Tolerância: 3.9900742459733305\n=====================================================================\n202 Derivada: [-0.715148146556885, 0.27118471352491724, -1.0462931183114392, -0.6809091364829527, -0.6575572714936158]\n202 Solução: [0.8885283883339654, 0.7908675848814303, 0.6241676777681238, 0.38972089721767583, 0.14859459137068218]\n202 Tolerância: 1.6049068905202268\n=====================================================================\n203 Derivada: [0.6965369878819274, -1.7169741508906498, 0.11884643367370984, -1.0475390189642013, -0.5931423815862082]\n203 Solução: [0.8903616538463793, 0.7901724092242008, 0.6268498256544202, 0.391466391830242, 0.15028022402465752]\n203 Tolerância: 2.2127910805581026\n=====================================================================\n204 Derivada: [-0.46169243225318723, -0.07842863981011305, -0.8340639645970072, -0.741588904307406, -0.643287461431953]\n204 Solução: [0.8895964154563724, 0.7920587333645837, 0.6267192570627376, 0.39261725256885016, 0.1509318697075525]\n204 Tolerância: 1.37067374367828\n=====================================================================\n205 Derivada: [1.5903551207389341, -2.953098515919862, 0.8730957023316961, -1.2679548479020717, -0.5560212614253004]\n205 Solução: [0.8931470286438663, 0.7926618833045296, 0.6331335673373484, 0.39832038989445645, 0.1558790266985453]\n205 Tolerância: 3.7321824043974687\n=====================================================================\n206 Derivada: [-0.685231208928542, 0.26579308211117336, -1.011578885792062, -0.6622043622897351, -0.6561835182063369]\n206 Solução: [0.8913998123246951, 0.7959062542326641, 0.632174355750314, 0.3997134066951301, 0.1564898899007948]\n206 Tolerância: 1.5596701706993497\n=====================================================================\n207 Derivada: [0.6806948534872959, -1.6688405478878678, 0.1321551435561048, -1.0258770847628078, -0.5917502922497846]\n207 Solução: [0.893156386468677, 0.7952248998961975, 0.6347675145151618, 0.4014109520574451, 0.15817200097041553]\n207 Tolerância: 2.160655326799313\n=====================================================================\n208 Derivada: [-0.45267702270240306, -0.05612272591628198, -0.8140221271225698, -0.7187987472415003, -0.6429471371411779]\n208 Solução: [0.892408552767336, 0.7970583428809376, 0.6346223245381104, 0.40253801428435737, 0.1588221172582876]\n208 Tolerância: 1.3419206127361358\n=====================================================================\n209 Derivada: [1.6296593727018376, -2.99136731257434, 0.9432739148676035, -1.2677143942453277, -0.5502219814771578]\n209 Solução: [0.8958898335815757, 0.7974899507584676, 0.6408825044708157, 0.4080658855094812, 0.16376665700925125]\n209 Tolerância: 3.795217749025776\n=====================================================================\n210 Derivada: [-0.6991078556884531, 0.32140912350891426, -1.0138278554222495, -0.6319192857011409, -0.6570454830204895]\n210 Solução: [0.8940994363215273, 0.8007763650423018, 0.6398461927967669, 0.40945863813987765, 0.16437114893226082]\n210 Tolerância: 1.5655456842228597\n=====================================================================\n211 Derivada: [0.7483402506044285, -1.7406919764217719, 0.214991016439825, -1.0274777735303182, -0.5860206682209324]\n211 Solução: [0.8958915829242443, 0.799952440287213, 0.6424451167425514, 0.41107854841816427, 0.16605546962848033]\n211 Tolerância: 2.243963819513272\n=====================================================================\n212 Derivada: [-0.46604215208674304, -0.003536133273797759, -0.8134560566553972, -0.6903693719379653, -0.6431248142332038]\n212 Solução: [0.8950694317700159, 0.8018648216089654, 0.6422089205574979, 0.41220736921427914, 0.16669929116339102]\n212 Tolerância: 1.3300894497220987\n=====================================================================\n213 Derivada: [1.3996286153575284, -2.652597179449117, 0.7808765245366232, -1.1963520698106649, -0.5547128361909088]\n213 Solução: [0.8979708172383293, 0.8018868361105479, 0.647273161144586, 0.41650532306789684, 0.17070311996293855]\n213 Tolerância: 3.3680802854885545\n=====================================================================\n214 Derivada: [-0.6405387578915338, 0.26645455186348954, -0.9565301151153704, -0.6260048878036741, -0.6521469772096609]\n214 Solução: [0.8964331393161835, 0.8048010664102356, 0.6464152645722191, 0.41781967470709314, 0.1713125456862928]\n214 Tolerância: 1.487755218149281\n=====================================================================\n215 Derivada: [0.6658284404360302, -1.6033665319834824, 0.1664455255041446, -0.9900316772793438, -0.5865075810295508]\n215 Solução: [0.8980751454094114, 0.8041180164115621, 0.6488673071036428, 0.4194244235649883, 0.172984309177675]\n215 Tolerância: 2.089490302815558\n=====================================================================\n216 Derivada: [-0.4400259643446134, -0.013319662822624423, -0.7816068007598034, -0.6763042289911958, -0.6403529350592692]\n216 Solução: [0.8973436444372527, 0.8058795274940634, 0.6486844445878301, 0.42051210485106183, 0.17362866565097407]\n216 Tolerância: 1.2931159055937278\n=====================================================================\n217 Derivada: [1.3076510602429607, -2.5069483832509434, 0.7309695218160215, -1.1604171110467014, -0.5547919673595416]\n217 Solução: [0.9000830638930114, 0.8059624501996242, 0.6535504048952947, 0.4247224949485412, 0.1776152378785159]\n217 Tolerância: 3.1911471185327143\n=====================================================================\n218 Derivada: [-0.6165337622712173, 0.26022843618486036, -0.9282299881270717, -0.6107219471149428, -0.6498012753947293]\n218 Solução: [0.898646435530928, 0.8087166659527075, 0.6527473377936902, 0.42599736726292353, 0.17822475053796852]\n218 Tolerância: 1.4507469193730473\n=====================================================================\n219 Derivada: [0.6498736075379838, -1.561178757464603, 0.1734755117304303, -0.9712007889168213, -0.5839131276651557]\n219 Solução: [0.9002269053805002, 0.8080495764556516, 0.6551268336128636, 0.42756293866055706, 0.1798905008777255]\n219 Tolerância: 2.043011033929002\n=====================================================================\n220 Derivada: [-0.43168919533229655, 0.0012899618872097562, -0.7650444025526753, -0.6578434459141462, -0.6383222724789945]\n220 Solução: [0.8995129329112812, 0.8097647386647802, 0.6549362477235113, 0.42862993171478697, 0.18053200699942795]\n220 Tolerância: 1.2695918580868586\n=====================================================================\n221 Derivada: [1.3233261157876655, -2.515674731753734, 0.7717959949080182, -1.1548612130354385, -0.5490656512426142]\n221 Solução: [0.9022004510951126, 0.8097567078961952, 0.6596990973976063, 0.4327253926207466, 0.18450593716256622]\n221 Tolerância: 3.2110213224477517\n=====================================================================\n222 Derivada: [-0.6222433644082344, 0.29524716122327277, -0.9260719258982419, -0.5873899127028537, -0.6483546232828985]\n222 Solução: [0.9007466016026701, 0.812520510702077, 0.6588511769930443, 0.4339941610432709, 0.18510915870323805]\n222 Tolerância: 1.448225648776714\n=====================================================================\n223 Derivada: [0.6914710127842341, -1.6032841314464008, 0.230029568901017, -0.9694928457854317, -0.5777961766731323]\n223 Solução: [0.9023417078835018, 0.8117636515241364, 0.6612251406702737, 0.43549992131753357, 0.18677120058421226]\n223 Tolerância: 2.091729086197519\n=====================================================================\n224 Derivada: [-0.4402291766883195, 0.038831706660289456, -0.7637054725807246, -0.6349283966073003, -0.6366091908758094]\n224 Solução: [0.9015820351399644, 0.8135250720787041, 0.6609724226380338, 0.43656503796939744, 0.1874059864228424]\n224 Tolerância: 1.2597464066332733\n=====================================================================\n225 Derivada: [1.2199601980804005, -2.355282373963064, 0.7059061680815546, -1.1160047760313745, -0.5486969995476088]\n225 Solução: [0.9040002862326126, 0.8133117631431922, 0.6651675820943488, 0.4400528038199014, 0.1909029851520186]\n225 Tolerância: 3.0133862217735095\n=====================================================================\n226 Derivada: [-0.593396400073857, 0.2768264047655009, -0.8945054521547036, -0.5764368625300023, -0.6442502308164322]\n226 Solução: [0.9026599979290575, 0.8158993536419309, 0.6643920504155483, 0.4412788832857562, 0.19150580167984188]\n226 Tolerância: 1.4057844665242802\n=====================================================================\n227 Derivada: [0.6596838318950518, -1.5417209910877432, 0.22020024885793532, -0.9476199063325907, -0.5752108798563498]\n227 Solução: [0.9041811556929187, 0.8151897156414334, 0.6666850941771754, 0.4427565656726129, 0.193157322046925]\n227 Tolerância: 2.0222328167657433\n=====================================================================\n228 Derivada: [-0.42799469800280576, 0.04330260102773309, -0.7452130689898837, -0.6199568420781247, -0.6334176646132263]\n228 Solução: [0.903456405389323, 0.8168835009099624, 0.6664431749584594, 0.44379765199548804, 0.1937892675936422]\n228 Tolerância: 1.2352981497805118\n=====================================================================\n229 Derivada: [0.9852308977095845, -2.0045472895896523, 0.518970923822792, -1.03747539289202, -0.5559917069301363]\n229 Solução: [0.9054939778041313, 0.8166773483904212, 0.6699909422546759, 0.44674910668799866, 0.19680480579187412]\n229 Tolerância: 2.577538078633927\n=====================================================================\n230 Derivada: [-0.5237544605816424, 0.19602540916898192, -0.8267656852884073, -0.5799786118912849, -0.6377680496923332]\n230 Solução: [0.9044115708120187, 0.8188796098169723, 0.6694207837690308, 0.4478889111967912, 0.19741563652458546]\n230 Tolerância: 1.3188667369560687\n=====================================================================\n231 Derivada: [0.8460719222740636, -1.7979008651101367, 0.40439325042324015, -0.9920000372130957, -0.5605600380393625]\n231 Solução: [0.9061378123202835, 0.8182335299771819, 0.6721457194993515, 0.4498004617975071, 0.19951765524305384]\n231 Tolerância: 2.3259644772630677\n=====================================================================\n232 Derivada: [-0.4816813452389397, 0.14328714483178828, -0.7861980153581101, -0.585253422496308, -0.6337125558744248]\n232 Solução: [0.9052082899447382, 0.820208762861214, 0.6717014398052831, 0.4508903055883906, 0.20013350489422013]\n232 Tolerância: 1.2707354624762817\n=====================================================================\n233 Derivada: [0.7357065238429072, -1.6312415559129363, 0.31287599520661047, -0.9532907190685052, -0.5646216583017889]\n233 Solução: [0.9067958627378842, 0.8197365029844491, 0.6742926686156675, 0.4528192414291377, 0.20222215711695077]\n233 Tolerância: 2.127831126622695\n=====================================================================\n234 Derivada: [-0.4471817317416935, 0.10167074947912624, -0.7523558996644368, -0.5880835481318698, -0.6304766752385262]\n234 Solução: [0.9059875914104201, 0.8215286384828886, 0.6739489327810899, 0.4538665578929581, 0.20284246899740926]\n234 Tolerância: 1.2327574537865211\n=====================================================================\n235 Derivada: [0.891039534010531, -1.851087309064269, 0.46201828126078226, -0.9954510896139723, -0.553728632963427]\n235 Solução: [0.9077889826637741, 0.8211190761375747, 0.6769796633339373, 0.4562355467953448, 0.20538223099483008]\n235 Tolerância: 2.3940498825097314\n=====================================================================\n236 Derivada: [-0.49406460104410144, 0.1808978072428431, -0.7915327783689889, -0.5644435569111579, -0.6318810584021222]\n236 Solução: [0.9068100573944754, 0.8231527413941151, 0.6764720748901694, 0.45732918202563355, 0.20599057544022448]\n236 Tolerância: 1.2732706651802563\n=====================================================================\n237 Derivada: [0.7994025571986185, -1.7121854464747912, 0.3872510811329448, -0.9629293086154789, -0.5563667674258639]\n237 Solução: [0.9084384441410807, 0.8225565205938762, 0.6790808865376258, 0.45918953066291396, 0.20807319123329787]\n237 Tolerância: 2.2265137962071995\n=====================================================================\n238 Derivada: [-0.4651623333834891, 0.14687985627739408, -0.7626329327776915, -0.5659864971783435, -0.6286537381345951]\n238 Solução: [0.9075601942613458, 0.8244375837064584, 0.678655439793217, 0.46024743639747684, 0.2086844340197765]\n238 Tolerância: 1.2389935093432711\n=====================================================================\n239 Derivada: [0.7262565500474807, -1.5995675214514904, 0.3280557179462278, -0.9353739118588962, -0.5583784209564655]\n239 Solução: [0.9090933220691283, 0.8239534826176534, 0.681169000484745, 0.46211287040917304, 0.21075641289302283]\n239 Tolerância: 2.0929396078775415\n=====================================================================\n240 Derivada: [-0.4414474197199638, 0.12037268351917874, -0.7383467292983141, -0.5659702805520439, -0.6258519321016038]\n240 Solução: [0.9082954327929531, 0.8257108199825294, 0.680808587708681, 0.4631405028806977, 0.21136986574807753]\n240 Tolerância: 1.2110054910544419\n=====================================================================\n241 Derivada: [0.915244107857859, -1.8705241312444798, 0.5095198563658414, -0.9895448941658387, -0.5450325067807569]\n241 Solução: [0.9100737244007118, 0.8252259202564389, 0.6837828848359814, 0.46542041245811683, 0.2138909977987798]\n241 Tolerância: 2.4233040084943758\n=====================================================================\n242 Derivada: [-0.500377128066134, 0.21712901244507066, -0.7894510315037593, -0.5383937565416659, -0.627902832256666]\n242 Solução: [0.9090682071923718, 0.8272809394435972, 0.6832231096031576, 0.46650755894828927, 0.21448978839460825]\n242 Tolerância: 1.2668405634958264\n=====================================================================\n243 Derivada: [0.5542440524437211, -1.3359934279109211, 0.1844977897421245, -0.8716430523724199, -0.5639029489332543]\n243 Solução: [0.9103509122325804, 0.8267243343091555, 0.6852468488196589, 0.4678877187245802, 0.2160994025886256]\n243 Tolerância: 1.789930861814185\n=====================================================================\n244 Derivada: [-0.38487722977248495, 0.052938588376093776, -0.6809010433596825, -0.5695310444158466, -0.6194045402081159]\n244 Solução: [0.9097420015304327, 0.8281921005263427, 0.6850441534940145, 0.4688453343827042, 0.21671892487138916]\n244 Tolerância: 1.150037739367162\n=====================================================================\n245 Derivada: [0.9271146617973045, -1.8772652807761006, 0.5371034163555919, -0.9840927184313983, -0.5395989113899375]\n245 Solução: [0.9115743027952187, 0.827940073555314, 0.6882857478166496, 0.47155672485294564, 0.21966775019708698]\n245 Tolerância: 2.435518851093265\n=====================================================================\n246 Derivada: [-0.5033142356533062, 0.23936853542198833, -0.787233090299992, -0.5212288295081464, -0.6251991342582741]\n246 Solução: [0.9105557442068183, 0.8300024987905417, 0.6876956683797355, 0.4726378814039567, 0.22026057126672924]\n246 Tolerância: 1.2621061382515903\n=====================================================================\n247 Derivada: [0.5802342474220836, -1.3624468978120774, 0.22253991022441255, -0.8696462083491738, -0.5576270657147333]\n247 Solução: [0.9118459784534881, 0.8293888831601874, 0.6897137219559439, 0.47397403929210014, 0.22186325459429562]\n247 Tolerância: 1.819255291319949\n=====================================================================\n248 Derivada: [-0.39071679854419017, 0.0784510482391454, -0.6805337542213294, -0.5522991647682076, -0.6164222168333922]\n248 Solução: [0.911208514070334, 0.8308857120274126, 0.6894692323084806, 0.47492946115185875, 0.22247588198582793]\n248 Tolerância: 1.143219616293594\n=====================================================================\n249 Derivada: [0.9883168033938432, -1.9582242347698013, 0.6108265936475163, -0.9957150178638798, -0.5303825254461998]\n249 Solução: [0.9130686160165282, 0.8305122268124069, 0.6927090780622198, 0.47755881508569176, 0.22541050923881895]\n249 Tolerância: 2.5411169601409105\n=====================================================================\n250 Derivada: [-0.5229287404371235, 0.28538851238818097, -0.8010160272858684, -0.4990188037936605, -0.6230481778959884]\n250 Solução: [0.9119828187471747, 0.8326635962109576, 0.6920380039236911, 0.478652740276216, 0.22599320488445077]\n250 Tolerância: 1.278177039797812\n=====================================================================\n251 Derivada: [0.6404190600612196, -1.4407003493967068, 0.29261016803309303, -0.8794810649921487, -0.5488630191655233]\n251 Solução: [0.9133233343171429, 0.8319320094482439, 0.6940913897358253, 0.47993196328398785, 0.22759037428579545]\n251 Tolerância: 1.9094795985202517\n=====================================================================\n252 Derivada: [-0.4083926176847399, 0.12077470725586181, -0.6917243443773771, -0.5314211084403055, -0.6139391950983182]\n252 Solução: [0.9126197489240092, 0.8335148101250713, 0.693769918603953, 0.48089819003996065, 0.2281933732082185]\n252 Tolerância: 1.1485570767142306\n=====================================================================\n253 Derivada: [0.8715192726007785, -1.7774456992914622, 0.5167357380612998, -0.9503325941222585, -0.5320171259429713]\n253 Solução: [0.9142648852013037, 0.8330282909185963, 0.6965564058310592, 0.483038924485582, 0.23066651693847295]\n253 Tolerância: 2.3177681379253237\n=====================================================================\n254 Derivada: [-0.48414135344364695, 0.24243378376380065, -0.7606227985833982, -0.4978676960160868, -0.6170667640283583]\n254 Solução: [0.9133074055316983, 0.834981051086275, 0.6959887029938336, 0.48408299105627295, 0.23125100840984586]\n254 Tolerância: 1.224890878957761\n=====================================================================\n255 Derivada: [0.5751358125171464, -1.3340976064677932, 0.24345856610793248, -0.848967224587291, -0.5481539605168919]\n255 Solução: [0.9145484905441881, 0.8343595777636382, 0.6979385417109053, 0.48535926322623607, 0.23283284459693027]\n255 Tolerância: 1.7863616130806175\n=====================================================================\n256 Derivada: [-0.38480195266100736, 0.09984450414651747, -0.6648116555556101, -0.5257401221765718, -0.6089624269493044]\n256 Solução: [0.9139166274689129, 0.8358252611691814, 0.6976710701416949, 0.4862919664759047, 0.23343506452425594]\n256 Tolerância: 1.1168052286073893\n=====================================================================\n257 Derivada: [0.8036292060196502, -1.668252268490761, 0.46663144661926026, -0.9201176868807721, -0.5311976427933374]\n257 Solução: [0.9154667329911069, 0.8354230555250365, 0.7003491444377563, 0.48840981608916484, 0.23588816023828513]\n257 Tolerância: 2.1852729652119773\n=====================================================================\n258 Derivada: [-0.4606648880758826, 0.22174172179555285, -0.734406599445748, -0.49202415169285985, -0.6121716048432972]\n258 Solução: [0.9145838395762904, 0.837255852206728, 0.6998364878191561, 0.48942068757133367, 0.23647175139858054]\n258 Tolerância: 1.190620184284554\n=====================================================================\n259 Derivada: [0.5412993335121996, -1.2741582222120655, 0.22305103131347437, -0.8287989195304917, -0.5455532980331483]\n259 Solução: [0.9157647432200396, 0.8366874224999767, 0.7017191219241806, 0.4906819799523823, 0.23804103895982434]\n259 Tolerância: 1.7177811141909562\n=====================================================================\n260 Derivada: [-0.37137317261311864, 0.09351385582846206, -0.6474708938555409, -0.5170879726414555, -0.6045619102880266]\n260 Solução: [0.9151700540108587, 0.8380872545312156, 0.7014740707423177, 0.491592525640343, 0.23864040171401116]\n260 Tolerância: 1.09487739394888\n=====================================================================\n261 Derivada: [0.7742430257980573, -1.6163125831135687, 0.4519654408338454, -0.9026471364404642, -0.527949982645147]\n261 Solução: [0.9166660641056137, 0.8377105507662815, 0.7040822908957886, 0.493675521623884, 0.24107577073738823]\n261 Tolerância: 2.1236041123842977\n=====================================================================\n262 Derivada: [-0.4498309084571588, 0.21929906334764837, -0.7202020339958892, -0.48232185907015435, -0.6079688473586913]\n262 Solução: [0.9158154553126227, 0.8394862848053467, 0.7035857468323725, 0.49466719938608666, 0.241655793911681]\n262 Tolerância: 1.1710642126954056\n=====================================================================\n263 Derivada: [0.533500382477996, -1.2533678411916753, 0.22686258927467406, -0.8175365187773735, -0.5412185385024557]\n263 Solução: [0.9169685863035406, 0.8389241167962768, 0.7054319678667859, 0.4959036201673944, 0.24321430780261513]\n263 Tolerância: 1.6936067146192528\n=====================================================================\n264 Derivada: [-0.36674442160358467, 0.09977325222436662, -0.6386332444450034, -0.5057934518223419, -0.6006225790174966]\n264 Solução: [0.9163824652778689, 0.8403011078327423, 0.70518272918228, 0.49680179261234025, 0.24380890824774723]\n264 Tolerância: 1.0811489684031128\n=====================================================================\n265 Derivada: [0.778353256506989, -1.6147026162361726, 0.46883401989254025, -0.8967568552426926, -0.5224459022990686]\n265 Solução: [0.9178598292809107, 0.8398991892141626, 0.7077553484531156, 0.49883929064824567, 0.24622840838294954]\n265 Tolerância: 2.123682237825948\n=====================================================================\n266 Derivada: [-0.4504549011807626, 0.23350825593610125, -0.7171443258349086, -0.4689955324799513, -0.6044286723053958]\n266 Solução: [0.9170047048535959, 0.8416731544907893, 0.7072402720152454, 0.4998244971542496, 0.24680238459397147]\n266 Tolerância: 1.164913102791926\n=====================================================================\n267 Derivada: [0.5496139195471983, -1.2688786090349993, 0.2534360509290252, -0.8148064957095968, -0.5351974217727857]\n267 Solução: [0.918159435435236, 0.8410745615495469, 0.709078654686453, 0.5010267562096792, 0.24835182332912933]\n267 Tolerância: 1.7107612699204757\n=====================================================================\n268 Derivada: [-0.37052874917446843, 0.11818154875064124, -0.6381155126476017, -0.4918627867157426, -0.5971628792641894]\n268 Solução: [0.9175556115490148, 0.8424685932245121, 0.708800221525032, 0.5019219293617039, 0.24893980877785432]\n268 Tolerância: 1.0756311380944124\n=====================================================================\n269 Derivada: [0.8149015689410248, -1.6622673766861453, 0.5169795899277005, -0.9025492678957718, -0.5146347825547863]\n269 Solução: [0.9190482200356795, 0.8419925200911171, 0.7113707552063516, 0.5039033102163938, 0.2513453721342652]\n269 Tolerância: 2.184927787240073\n=====================================================================\n270 Derivada: [-0.46261381762417386, 0.2646628106932951, -0.7255563526750564, -0.4518494605734773, -0.6016147037713111]\n270 Solução: [0.9181529424330832, 0.8438187415742928, 0.7108027844654643, 0.5048948804570019, 0.2519107667928337]\n270 Tolerância: 1.173285197785433\n=====================================================================\n271 Derivada: [0.5901901005170771, -1.3218125372544591, 0.3038532113691019, -0.8211386082758452, -0.5273670067214411]\n271 Solução: [0.9193388421120514, 0.8431402846621152, 0.7126627311703198, 0.5060531859589603, 0.25345299198560683]\n271 Tolerância: 1.7720677431267318\n=====================================================================\n272 Derivada: [-0.3832323106687454, 0.14963674569335694, -0.6465904861883018, -0.47499078213475343, -0.5942635023536482]\n272 Solução: [0.9186904399020107, 0.8445924712875168, 0.7123289080621261, 0.5069553157776228, 0.25403237468342094]\n272 Tolerância: 1.0798623846463207\n=====================================================================\n273 Derivada: [0.6550676634423667, -1.415405479928836, 0.37241006467439774, -0.840053572060576, -0.5204869975611857]\n273 Solução: [0.9199535346759433, 0.8440992837711935, 0.7144600046352565, 0.5085208371542876, 0.2559910068322916]\n273 Tolerância: 1.8835531028174386\n=====================================================================\n274 Derivada: [-0.40546494226816776, 0.18942322068599537, -0.6666065863545327, -0.46085014345172937, -0.5940202029476538]\n274 Solução: [0.9192338558464779, 0.8456542946744356, 0.7140508627184999, 0.5094437475728112, 0.2565628309262919]\n274 Tolerância: 1.0999500775691042\n=====================================================================\n275 Derivada: [0.47890418233595256, -1.1465354775096728, 0.20436827143828395, -0.7743528789782843, -0.5304863250242278]\n275 Solução: [0.9202732557228978, 0.8451687126878138, 0.7157596930790279, 0.5106251261143745, 0.2580855877941997]\n275 Tolerância: 1.5705722936679931\n=====================================================================\n276 Derivada: [-0.34209218644514294, 0.09823560478420745, -0.6027610140736783, -0.47866992821690246, -0.5878306759745584]\n276 Solução: [0.91974711587414, 0.8464283341841012, 0.7155351673901919, 0.5114758555956739, 0.25866839747745385]\n276 Tolerância: 1.031827476837648\n=====================================================================\n277 Derivada: [0.7408905664037206, -1.5375666006391668, 0.46801209215485073, -0.8634914868885915, -0.5094786653409642]\n277 Solução: [0.9211251727775446, 0.8460326097019383, 0.7179632818267679, 0.5134040913904929, 0.2610363677297928]\n277 Tolerância: 2.034023416668783\n=====================================================================\n278 Derivada: [-0.4351450727712063, 0.24647438996254323, -0.692552977100803, -0.43784887829275476, -0.592531047459012]\n278 Solução: [0.9203112060908217, 0.8477218308208045, 0.7174491083856799, 0.5143527514713031, 0.26159609770880504]\n278 Tolerância: 1.1280670050049106\n=====================================================================\n279 Derivada: [0.5568286183657278, -1.256728217639818, 0.29137451613826215, -0.7947648433288563, -0.5199220869923096]\n279 Solução: [0.9214266902861581, 0.8470899994988791, 0.7192244517107674, 0.5154751668087235, 0.2631150371615198]\n279 Tolerância: 1.6959637863563084\n=====================================================================\n280 Derivada: [-0.3685105690678938, 0.14981109464656583, -0.6251954648628839, -0.45730283816456563, -0.58586970697867]\n280 Solução: [0.9208149400950825, 0.8484706823551729, 0.7189043381066116, 0.516348321543826, 0.263686240626233]\n280 Tolerância: 1.0495151129105775\n=====================================================================\n281 Derivada: [0.6412470609625984, -1.3808302495886267, 0.37993123291349207, -0.8215673097819973, -0.5114316143715101]\n281 Solução: [0.9220295135038756, 0.8479769202024071, 0.7209649188623852, 0.5178555452535969, 0.2656172076780426]\n281 Tolerância: 1.8435761046933763\n=====================================================================\n282 Derivada: [-0.39842608464596196, 0.20103085927425468, -0.6532953632655563, -0.44042076051287893, -0.5861861740771133]\n282 Solução: [0.9213250184417829, 0.8494939456230978, 0.7205475139434129, 0.5187581460578008, 0.26617908323094097]\n282 Tolerância: 1.0786733796386365\n=====================================================================\n283 Derivada: [0.4882051771214151, -1.145857884338369, 0.2322017257661173, -0.7629133398987378, -0.5201791027809932]\n283 Solução: [0.9223463743716616, 0.848978607727009, 0.722222221295534, 0.519887154355014, 0.2676817577494492]\n283 Tolerância: 1.5677612352277386\n=====================================================================\n284 Derivada: [-0.3421353539779943, 0.11978538928730131, -0.5957701363065837, -0.4564127888037177, -0.5803222251576017]\n284 Solução: [0.9218100161448436, 0.850237484797205, 0.7219671168604882, 0.5207253159833207, 0.2682532435801412]\n284 Tolerância: 1.0155952510909945\n=====================================================================\n285 Derivada: [0.7775368071574462, -1.5810056502919565, 0.5266001384816832, -0.864811118298519, -0.4964109532433696]\n285 Solução: [0.9231882469408975, 0.8497549508803982, 0.7243670698021529, 0.5225638928913435, 0.27059096738753874]\n285 Tolerância: 2.0918346405539188\n=====================================================================\n286 Derivada: [-0.44747279576171195, 0.2870724253293986, -0.6993875715698721, -0.41046071779027216, -0.5861140678361352]\n286 Solução: [0.9223340194916279, 0.8514918955645568, 0.7237885296109499, 0.5235140027625211, 0.2711363407492563]\n286 Tolerância: 1.1330459641877006\n=====================================================================\n287 Derivada: [0.3063650531558437, -0.8623025865627767, 0.05860579538705224, -0.6887651192051365, -0.5289698874008408]\n287 Solução: [0.9231533666518361, 0.8509662502545055, 0.7250691465022521, 0.5242655787838656, 0.27220954766057737]\n287 Tolerância: 1.262959408315054\n=====================================================================\n288 Derivada: [-0.6598220780803103, 0.6169731196304156, -0.9119179844613541, -0.32684771478805885, -0.6000479322843617]\n288 Solução: [0.9225923954851689, 0.8525451734476746, 0.7249618360858939, 0.5255267453839727, 0.27317812045244905]\n288 Tolerância: 1.454132856775363\n=====================================================================\n289 Derivada: [0.08576847325031167, -0.5231216783220134, -0.16062572124815233, -0.6038609127499086, -0.543710891036298]\n289 Solução: [0.9233172976705599, 0.8518673465340181, 0.7259636991059321, 0.5258858310081295, 0.27383735279992943]\n289 Tolerância: 0.9834036193950357\n=====================================================================\n290 Derivada: [-0.9285088424534251, 1.0367868731715362, -1.1824030210162846, -0.21875001901356939, -0.6185424384882765]\n290 Solução: [0.9229717947875936, 0.8539746482167118, 0.7266107509615459, 0.5283183761888847, 0.27602759442641844]\n290 Tolerância: 1.9405099623637359\n=====================================================================\n291 Derivada: [0.19000991260435285, -0.6776824766728851, -0.05118775533830444, -0.6377716283911852, -0.5334312320042969]\n291 Solução: [0.9239918850686093, 0.8528356001382762, 0.7279097777180336, 0.528558702137508, 0.2767071454452654]\n291 Tolerância: 1.0905407015019426\n=====================================================================\n292 Derivada: [-0.7852991041740438, 0.8218725900666755, -1.0383206687925224, -0.2668883039587939, -0.6061370450248589]\n292 Solução: [0.9235047991110054, 0.854572823284044, 0.7280409963291303, 0.5301936147591163, 0.2780745839062139]\n292 Tolerância: 1.6759804690695779\n=====================================================================\n293 Derivada: [0.13916720763251078, -0.5964025227588081, -0.10014797016503962, -0.6150760661742325, -0.5351535350659731]\n293 Solução: [0.9243675544744778, 0.8536698870889025, 0.7291817294857628, 0.5304868270071179, 0.27874050595275]\n293 Tolerância: 1.024597955314891\n=====================================================================\n294 Derivada: [-0.6704547744564024, 0.6499531339001976, -0.921481036908375, -0.3051882871780123, -0.5958544950252929]\n294 Solução: [0.9240108025994433, 0.8551987509778105, 0.7294384564600628, 0.5320635600869102, 0.2801123594972307]\n294 Tolerância: 1.4728402670053202\n=====================================================================\n295 Derivada: [0.09845051839488406, -0.530479349724061, -0.138950369860936, -0.5960522915452628, -0.5363101269086386]\n295 Solução: [0.9247473862139585, 0.8544846911383206, 0.7304508257633069, 0.5323988499531946, 0.28076698479694107]\n295 Tolerância: 0.9763784803146237\n=====================================================================\n296 Derivada: [-0.9637058225443411, 1.1087003898881278, -1.2198228209397826, -0.18606527023756314, -0.6167123042461498]\n296 Solução: [0.9243507959909322, 0.8566216318781759, 0.7310105623606471, 0.5347999395065386, 0.2829274137749666]\n296 Tolerância: 2.015159431019445\n=====================================================================\n297 Derivada: [0.21330829490960923, -0.7024623648831891, -0.01766945306286516, -0.6349373461347483, -0.5249415545250429]\n297 Solução: [0.9254095548291766, 0.8554035772506132, 0.7323506997371678, 0.5350043569176882, 0.2836049541482839]\n297 Tolerância: 1.1036190835201583\n=====================================================================\n298 Derivada: [-0.5309658560626076, 0.44512589268373404, -0.778257690409788, -0.3474071970274153, -0.5817720427749649]\n298 Solução: [0.9250189756758997, 0.8566898242566249, 0.7323830534720241, 0.5361669619215971, 0.28456615084236053]\n298 Tolerância: 1.2429389782385807\n=====================================================================\n299 Derivada: [0.43903637980298527, -1.0456554827545403, 0.2154503444541831, -0.718221928895133, -0.5052286369639489]\n299 Solução: [0.9259912031955456, 0.8558747744042987, 0.7338080858641319, 0.5368030834981854, 0.2856314072683401]\n299 Tolerância: 1.450401977758188\n=====================================================================\n300 Derivada: [-0.31813180997176005, 0.12013061160212146, -0.5595239277016901, -0.42562594823979083, -0.5637694131446764]\n300 Solução: [0.9255088634228128, 0.8570235658282234, 0.7335713850462502, 0.5375921456759266, 0.2861864680267233]\n300 Tolerância: 0.9631716550247147\n=====================================================================\n301 Derivada: [0.7435979220775266, -1.5089031148657455, 0.5322576404863071, -0.8313431288596362, -0.47884169001240906]\n301 Solução: [0.9267904002549744, 0.8565396412453535, 0.7358253266495407, 0.5393067033287481, 0.28845751180526025]\n301 Tolerância: 2.0083439540132773\n=====================================================================\n302 Derivada: [-0.43344721900064087, 0.30377189767261825, -0.6764719433472806, -0.3745508624071121, -0.5708226247159658]\n302 Solução: [0.9259734591784732, 0.8581973717182285, 0.7352405709409986, 0.5402200441685597, 0.28898358299790083]\n302 Tolerância: 1.0972214749213558\n=====================================================================\n303 Derivada: [0.3191994157247109, -0.8552957970964883, 0.09978221280945831, -0.6655421328746627, -0.5100733636520189]\n303 Solução: [0.9267671247406082, 0.8576411487610643, 0.7364792280638268, 0.5409058672808774, 0.2900287904406181]\n303 Tolerância: 1.2435822548033186\n=====================================================================\n304 Derivada: [-0.6636796375856306, 0.6639400206024106, -0.913147009719097, -0.28064278250469954, -0.5872446226212915]\n304 Solução: [0.9261826531541982, 0.8592072421395369, 0.736296521375333, 0.5421245113230063, 0.290962762664102]\n304 Tolerância: 1.4624435345789784\n=====================================================================\n305 Derivada: [0.10878248864446505, -0.5289411689233532, -0.11485114919699413, -0.5810947415903911, -0.5250902554033487]\n305 Solução: [0.9269117933810379, 0.8584778158473712, 0.7372997346428466, 0.5424328346924573, 0.29160792887547793]\n305 Tolerância: 0.9582244264853167\n=====================================================================\n306 Derivada: [-0.7844412613471263, 0.8566311351799811, -1.0378428540589937, -0.22749322670595262, -0.5952483382031204]\n306 Solução: [0.9265532573466873, 0.8602211522195551, 0.73767827236603, 0.5443480639433045, 0.2933385730278083]\n306 Tolerância: 1.682960646098296\n=====================================================================\n307 Derivada: [0.15853386652452173, -0.6020165664574222, -0.06028709956262901, -0.5965827026513608, -0.5188888565392347]\n307 Solução: [0.9274150702558821, 0.8592800291462372, 0.7388184805797179, 0.5445979954667851, 0.29399253238374434]\n307 Tolerância: 1.008140880056686\n=====================================================================\n308 Derivada: [-0.6941064903674032, 0.7209942340893747, -0.9444474225037425, -0.25793089753346976, -0.5864718403239877]\n308 Solução: [0.927008672404684, 0.8608232845045876, 0.7389730251464678, 0.5461273212426248, 0.2953226918060251]\n308 Tolerância: 1.5179179842236756\n=====================================================================\n309 Derivada: [0.12582014879552617, -0.5484102240806124, -0.09188325193048286, -0.5804350111373964, -0.5195271926570015]\n309 Solução: [0.9277712405703709, 0.8600311765813937, 0.7400106260745114, 0.5464106925900126, 0.2959670090134123]\n309 Tolerância: 0.9653187669203013\n=====================================================================\n310 Derivada: [-0.8309780041803947, 0.9388251942581007, -1.0867731297813066, -0.1978907227932467, -0.5959228253295521]\n310 Solução: [0.9273565501385497, 0.8618386809820501, 0.7403134639409815, 0.5483237474362906, 0.2976793178759293]\n310 Tolerância: 1.7740568695327776\n=====================================================================\n311 Derivada: [0.18217181381578484, -0.6322441737325164, -0.030082463183561003, -0.5988913914387552, -0.512676464738]\n311 Solução: [0.928269489840408, 0.8608072568184364, 0.7415074285611026, 0.5485411566776407, 0.298334018245554]\n311 Tolerância: 1.027293665800845\n=====================================================================\n312 Derivada: [-0.4781304304898981, 0.39385168135709137, -0.7188570227168043, -0.3342139004158895, -0.565781970521904]\n312 Solução: [0.9279359232867903, 0.8619649304763939, 0.7415625111963264, 0.549637759567238, 0.29927275688948346]\n312 Tolerância: 1.1542492988849429\n=====================================================================\n313 Derivada: [0.3951675380575921, -0.958906595179883, 0.1944823500393511, -0.6805600111631804, -0.4932047177916843]\n313 Solução: [0.9288114062527753, 0.8612437665090652, 0.7428787777174143, 0.5502497234962223, 0.3003087346187106]\n313 Tolerância: 1.3490350063423877\n=====================================================================\n314 Derivada: [-0.2965806218937814, 0.1146510434214747, -0.5283648665424607, -0.4032581740923007, -0.5495118561530106]\n314 Solução: [0.9283772622290304, 0.8622972527586524, 0.742665113026209, 0.5509974090553615, 0.30085058550495636]\n314 Tolerância: 0.9191598306398897\n=====================================================================\n315 Derivada: [0.7027014496484867, -1.4310033963856768, 0.5205429139307398, -0.7995459467340424, -0.4653454496471241]\n315 Solução: [0.929571983972499, 0.8618354016315883, 0.7447935359505133, 0.5526218621492393, 0.30306419527705714]\n315 Tolerância: 1.9152924271491947\n=====================================================================\n316 Derivada: [-0.4166408338451788, 0.30647705241563017, -0.6529121150212802, -0.34905223081241843, -0.5574218694760944]\n316 Solução: [0.9287999731025238, 0.8634075489176566, 0.7442216504249547, 0.5535002695614226, 0.30357543905718704]\n316 Tolerância: 1.0613051391713366\n=====================================================================\n317 Derivada: [0.32046695717912144, -0.8376737043273579, 0.12293765148618263, -0.6447332615558423, -0.49487385773313264]\n317 Solução: [0.92956286525434, 0.8628463726742197, 0.74541716821369, 0.554139403284834, 0.30459610898420625]\n317 Tolerância: 1.2165899413287156\n=====================================================================\n318 Derivada: [-0.26571233032854025, 0.07444915258270157, -0.4934080312126241, -0.4070092816449602, -0.5432416139421363]\n318 Solução: [0.9292107897398609, 0.8637666684919622, 0.7452821048758755, 0.5548477284012894, 0.3051397936423603]\n318 Tolerância: 0.883382822188448\n=====================================================================\n319 Derivada: [0.7441359035449295, -1.490437526170524, 0.5733471174941656, -0.8111328821325685, -0.45678975877048345]\n319 Solução: [0.930475777640595, 0.8634122352704381, 0.7476310933057208, 0.5567853946591206, 0.30772602691186024]\n319 Tolerância: 1.9926019712163021\n=====================================================================\n320 Derivada: [-0.4326800079874715, 0.3403444841745795, -0.6680726587411527, -0.3324225660751239, -0.5550486113441195]\n320 Solução: [0.9296582455200012, 0.8650496788416704, 0.7470011953494894, 0.5576765318587291, 0.3082278711292595]\n320 Tolerância: 1.0807178900936993\n=====================================================================\n321 Derivada: [0.3490730927319987, -0.8762463940302609, 0.1600020986987829, -0.6498773712290387, -0.4876373283020783]\n321 Solução: [0.9304505062768142, 0.8644264894785577, 0.748224472922868, 0.5582852157565718, 0.30924419549085147]\n321 Tolerância: 1.2551458880535313\n=====================================================================\n322 Derivada: [-0.2754817496407327, 0.09781782005495643, -0.5010013471765262, -0.39377496239995935, -0.5400334399193341]\n322 Solução: [0.930067003123178, 0.8653891625188742, 0.7480486893671686, 0.5589991923607052, 0.309779929860324]\n322 Tolerância: 0.8849608507851157\n=====================================================================\n323 Derivada: [0.6426739530692771, -1.3292152043126748, 0.4747180538670648, -0.7659145237278153, -0.4601365068148624]\n323 Solução: [0.9311767318509788, 0.8649951210074224, 0.7500668832705897, 0.5605854440402949, 0.3119553575357803]\n323 Tolerância: 1.7898465751449204\n=====================================================================\n324 Derivada: [-0.39206267398287764, 0.284656158897576, -0.6235227972035489, -0.3402455553310233, -0.5478577326291756]\n324 Solução: [0.9304706691583978, 0.8664554404457542, 0.7495453424399252, 0.5614269028676325, 0.31246087860039623]\n324 Tolerância: 1.0195283619819322\n=====================================================================\n325 Derivada: [0.3011196021299156, -0.7963904269163891, 0.11511534395660306, -0.6244500213160364, -0.4872132965340299]\n325 Solução: [0.931188557355388, 0.865934219451679, 0.7506870467805079, 0.5620499110866224, 0.31346403606981]\n325 Tolerância: 1.1685362188337807\n=====================================================================\n326 Derivada: [-0.25484008189403085, 0.07275180336142739, -0.4766833441671565, -0.394020739739517, -0.5344895010969211]\n326 Solução: [0.9308577374800011, 0.8668091601062503, 0.750560577286415, 0.5627359523698066, 0.31399930458406855]\n326 Tolerância: 0.8592989427237716\n=====================================================================\n327 Derivada: [0.7192152712059965, -1.4439451023343963, 0.5648552269207983, -0.7925264020910134, -0.4485189596434864]\n327 Solução: [0.93207096540894, 0.8664628075267709, 0.7528299437930702, 0.5646117835282345, 0.3165438713007165]\n327 Tolerância: 1.9366406087455035\n=====================================================================\n328 Derivada: [-0.4224931360089954, 0.3405254127141575, -0.6543426063850291, -0.31796052809535524, -0.5467610305084563]\n328 Solução: [0.9312808119127419, 0.8680491729956441, 0.7522093753064629, 0.5654824790383443, 0.3170366289468092]\n328 Tolerância: 1.0595606519302998\n=====================================================================\n329 Derivada: [0.3486986240985175, -0.8652241960012361, 0.17244814563071031, -0.6380248863441693, -0.4782892174331508]\n329 Solução: [0.9320544199498678, 0.8674256523424809, 0.7534075124031152, 0.5660646821537533, 0.318037778294664]\n329 Tolerância: 1.2392652394999875\n=====================================================================\n330 Derivada: [-0.27258902697820986, 0.1081455027678544, -0.49312688613991895, -0.37778017462738944, -0.5320090041774037]\n330 Solução: [0.9316713281997596, 0.8683762160343769, 0.7532180552118706, 0.5667656372290826, 0.318563242522801]\n330 Tolerância: 0.8688646581022993\n=====================================================================\n331 Derivada: [0.6558519164157133, -1.3417302564806164, 0.5053347295525157, -0.7625492688722915, -0.4488576665877986]\n331 Solução: [0.9327694041141006, 0.8679405713088716, 0.7552045282639479, 0.5682874567801939, 0.32070634520076174]\n331 Tolerância: 1.807954534446574\n=====================================================================\n332 Derivada: [-0.39671038890043064, 0.3073499290190398, -0.6253781940343117, -0.32025967450032056, -0.5408078578887228]\n332 Solução: [0.9320488636785853, 0.8694146401941653, 0.7546493509487657, 0.5691252184281249, 0.32119947496141726]\n332 Tolerância: 1.018813228893499\n=====================================================================\n333 Derivada: [0.31993293162918235, -0.8154809839672907, 0.14736174522383294, -0.6205669069720301, -0.4763238470055171]\n333 Solução: [0.9327752620957613, 0.8688518656659322, 0.7557944526224125, 0.569711631406336, 0.3221897237246412]\n333 Tolerância: 1.1836678136890795\n=====================================================================\n334 Derivada: [-0.25960747658484706, 0.09451756459679928, -0.47699487486784165, -0.3753101431527597, -0.5271219058514021]\n334 Solução: [0.9324237732792742, 0.8697477798328884, 0.7556325561738023, 0.5703934065726871, 0.3227130287323377]\n334 Tolerância: 0.8500405459435504\n=====================================================================\n335 Derivada: [0.6131091045068047, -1.2712394729307448, 0.4669570370487577, -0.7405160976210823, -0.44784003124438243]\n335 Solução: [0.9334695553504778, 0.8693670328075351, 0.7575540443171909, 0.5719052760458366, 0.3248364446128426]\n335 Tolerância: 1.7201529671621225\n=====================================================================\n336 Derivada: [-0.3789236880020326, 0.2864911059759834, -0.6047898519100272, -0.31943146614590034, -0.5356806785570285]\n336 Solução: [0.9327959735706242, 0.8707636582050421, 0.7570410299942614, 0.5727188313288676, 0.32532845636591873]\n336 Tolerância: 0.9901622751090825\n=====================================================================\n337 Derivada: [0.3016632559657637, -0.7820974472841726, 0.13321011886836231, -0.607458078410275, -0.4735694680146878]\n337 Solução: [0.9334898035657451, 0.8702390773225177, 0.7581484332875537, 0.573303727812289, 0.32630931698339377]\n337 Tolerância: 1.1461655338033974\n=====================================================================\n338 Derivada: [-0.25079128453381827, 0.08724709557839105, -0.46530847904321604, -0.3713023296521101, -0.5226458121351101]\n338 Solução: [0.9331583864144155, 0.8710983152406766, 0.7580020842800078, 0.5739711011894487, 0.32682959593995287]\n338 Tolerância: 0.8354904964761427\n=====================================================================\n339 Derivada: [0.5873550376259686, -1.2272409539850173, 0.4463593904781078, -0.7255130811328883, -0.4454174074362953]\n339 Solução: [0.9341686540401011, 0.8707468559933516, 0.7598764958777161, 0.5754668259060649, 0.3289349806812198]\n339 Tolerância: 1.6658658116945835\n=====================================================================\n340 Derivada: [-0.3679275692956594, 0.276031230853107, -0.5914688781341226, -0.3159805088973826, -0.5311499438328156]\n340 Solução: [0.933523366523178, 0.8720951431742433, 0.7593861108051694, 0.5762638983828955, 0.329424330860288]\n340 Tolerância: 0.9712689201719487\n=====================================================================\n341 Derivada: [0.292308760025378, -0.7627878127502754, 0.12843896150553746, -0.598165250327682, -0.47006932682216984]\n341 Solução: [0.9341970620235973, 0.8715897148950933, 0.7604691226669872, 0.5768424759748707, 0.3303968954547084]\n341 Tolerância: 1.1236338656427058\n=====================================================================\n342 Derivada: [-0.2456210181142069, 0.08548061555001141, -0.4575825837437435, -0.3659426302219373, -0.518501019332021]\n342 Solução: [0.9338759220284523, 0.8724277386151559, 0.7603280154094738, 0.5774996399461779, 0.33091332904130505]\n342 Tolerância: 0.8244851464895913\n=====================================================================\n343 Derivada: [0.5764328688271121, -1.2065374478244735, 0.4415136040211678, -0.716825359669599, -0.4417227678195417]\n343 Solução: [0.9348653621648987, 0.8720833953152107, 0.762171304626215, 0.5789737740767106, 0.3330020172295322]\n343 Tolerância: 1.6407008647765187\n=====================================================================\n344 Derivada: [-0.36298948112676044, 0.2748800535418354, -0.5847322483201083, -0.3101360654985399, -0.5271717736363399]\n344 Solução: [0.9342320741010017, 0.8734089369449007, 0.7616862432936722, 0.5797613019376757, 0.3334873083562871]\n344 Tolerância: 0.9608988851648882\n=====================================================================\n345 Derivada: [0.29097374828512557, -0.7562262831075941, 0.13223273361415977, -0.5924101812263416, -0.4658732348586625]\n345 Solução: [0.9348967276919321, 0.8729056165343627, 0.7627569200178911, 0.5803291780341696, 0.3344525887035216]\n345 Tolerância: 1.1144582105539882\n=====================================================================\n346 Derivada: [-0.24380297146024077, 0.08879629215573459, -0.45355852081016224, -0.3593095216266562, -0.514674099476153]\n346 Solução: [0.9345770543844899, 0.8737364315426597, 0.762611644797856, 0.5809800192977239, 0.3349644123258029]\n346 Tolerância: 0.8167207375817742\n=====================================================================\n347 Derivada: [0.5792357882363035, -1.2075299209379295, 0.45147864492886924, -0.7141553013932622, -0.43680608430668144]\n347 Solução: [0.9355591708466711, 0.873378731635294, 0.7644387238001431, 0.5824274331421673, 0.3370376844550403]\n347 Tolerância: 1.6426481439907754\n=====================================================================\n348 Derivada: [-0.36377862674652306, 0.28258179519474425, -0.5843222383016382, -0.3019628400958112, -0.5237390346189983]\n348 Solução: [0.9349228034035404, 0.8747053636285119, 0.7639427145466812, 0.5832120275894987, 0.33751757395195925]\n348 Tolerância: 0.9586991037907961\n=====================================================================\n349 Derivada: [0.29730208662380164, -0.7619217190587051, 0.14434669973940117, -0.5901346468003226, -0.4609868291463073]\n349 Solução: [0.935588901963257, 0.8741879409078184, 0.7650126405201338, 0.583764938063307, 0.3384765687663251]\n349 Tolerância: 1.118265158363795\n=====================================================================\n350 Derivada: [-0.24525640822702144, 0.09710674035562761, -0.4532044440352365, -0.3513923981410585, -0.5111714115506345]\n350 Solução: [0.9352622761356674, 0.8750250131089327, 0.7648540564994241, 0.5844132793500749, 0.33898302402295555]\n350 Tolerância: 0.8122484363482609\n=====================================================================\n351 Derivada: [0.5955440544850603, -1.2299986947409138, 0.47626138509991733, -0.7175756010525021, -0.4306394549225132]\n351 Solução: [0.936250247506699, 0.8746338360542775, 0.7666797091670465, 0.5858288004851646, 0.34104218620327426]\n351 Tolerância: 1.671753420871621\n=====================================================================\n352 Derivada: [-0.3703215047886488, 0.2992403342130956, -0.5903670262339631, -0.29137306961514753, -0.5208765738430685]\n352 Solução: [0.9355959632671524, 0.875985152979652, 0.766156472782049, 0.5866171525859303, 0.34151530083880927]\n352 Tolerância: 0.9651044974865025\n=====================================================================\n353 Derivada: [0.3114188732074581, -0.7801392960582803, 0.165058771327665, -0.5914868284801429, -0.4553716032864372]\n353 Solução: [0.9362740421943777, 0.875437227563002, 0.7672374670927802, 0.5871506726108604, 0.34246905433095354]\n353 Tolerância: 1.1358095787505353\n=====================================================================\n354 Derivada: [-0.2500990723052041, 0.11063603419250967, -0.45670566242054633, -0.3420941684285026, -0.508017327208492]\n354 Solução: [0.9359319072018402, 0.8762943141919723, 0.7670561281106086, 0.5878004994487902, 0.34296934051620476]\n354 Tolerância: 0.8114677842857402\n=====================================================================\n355 Derivada: [0.46648839304577905, -1.0231417070666566, 0.3394772053861743, -0.6573766635987823, -0.4384966993495567]\n355 Solução: [0.9367562083434711, 0.875929669059746, 0.7685613835897779, 0.5889280070839915, 0.34464371403117416]\n355 Tolerância: 1.4156618875095968\n=====================================================================\n356 Derivada: [-0.3153019778524708, 0.21743177136391978, -0.5282470449219545, -0.30908993924875006, -0.5123848371905808]\n356 Solução: [0.9362437088882206, 0.8770537261109667, 0.7681884227928448, 0.5896502226567929, 0.3451254608932525]\n356 Tolerância: 0.8853313870142204\n=====================================================================\n357 Derivada: [0.46259689991353525, -1.0156516237101982, 0.33809646818090755, -0.6540679473791045, -0.4366956136549618]\n357 Solução: [0.9370519781185553, 0.8764963448611324, 0.7695425717117121, 0.5904425674717616, 0.346438947414371]\n357 Tolerância: 1.4065423111749527\n=====================================================================\n358 Derivada: [-0.3134607438385615, 0.21709609937840924, -0.5255835868863699, -0.3066494108375508, -0.5105574673692388]\n358 Solução: [0.9365437539853495, 0.8776121730610092, 0.7691711278379783, 0.5911611479803568, 0.3469187155446071]\n358 Tolerância: 0.8810954827661954\n=====================================================================\n359 Derivada: [0.4612170433895244, -1.0119549781640558, 0.3392111531419175, -0.6516338311775769, -0.43480264451362416]\n359 Solução: [0.9373473032554435, 0.8770556522984425, 0.7705184490445961, 0.5919472365579432, 0.3482275176460175]\n359 Tolerância: 1.401969412994116\n=====================================================================\n360 Derivada: [-0.3126071849718528, 0.2183488660160151, -0.5240235011540335, -0.3037621871543763, -0.5088789628571675]\n360 Solução: [0.9368405950778915, 0.8781674192422263, 0.7701457805413884, 0.59266314286661, 0.34870520609824196]\n360 Tolerância: 0.878196660396539\n=====================================================================\n361 Derivada: [0.4621544314495054, -1.0119319840920866, 0.3429031464245895, -0.6502727340964327, -0.43270006726469035]\n361 Solução: [0.9376419562698359, 0.8776076870417457, 0.771489102504796, 0.593441830113954, 0.3500097053926756]\n361 Tolerância: 1.4018771713973244\n=====================================================================\n362 Derivada: [-0.31274071296417105, 0.22116631883557147, -0.5235808149124352, -0.3004551832319464, -0.5073109851715429]\n362 Solução: [0.9371342182470033, 0.8787194287234875, 0.7711123778566245, 0.5941562410767064, 0.3504850838845436]\n362 Tolerância: 0.8766389887996365\n=====================================================================\n363 Derivada: [0.24288264372898993, -0.6623294955442702, 0.09955837246357646, -0.5500430516375729, -0.45233867856916277]\n363 Solução: [0.9377068635954484, 0.8783144610986665, 0.772071082962055, 0.5947063909483469, 0.35141399804196216]\n363 Tolerância: 1.007343956950543\n=====================================================================\n364 Derivada: [-0.5254685407210218, 0.5632514183457715, -0.7616031835596004, -0.20106512592161607, -0.5264757165579255]\n364 Solução: [0.937262132192136, 0.8795272226261523, 0.7718887861374757, 0.5957135498563747, 0.3522422548996938]\n364 Tolerância: 1.221070277154914\n=====================================================================\n365 Derivada: [0.1136570296721402, -0.45536928659169007, -0.043730740757780495, -0.4898228388985473, -0.46344124978880075]\n365 Solução: [0.9378394291729086, 0.8789084161362706, 0.7727255083850387, 0.5959344466011617, 0.3528206583968888]\n365 Tolerância: 0.8227367080449131\n=====================================================================\n366 Derivada: [-0.5296835762783303, 0.5729625341858195, -0.7662998949024882, -0.1955886941011329, -0.5254663087989542]\n366 Solução: [0.9375480720411807, 0.8800757446297307, 0.772837611114032, 0.5971900959684553, 0.3540086791788181]\n366 Tolerância: 1.2289979920366783\n=====================================================================\n367 Derivada: [0.1170319392298893, -0.45869775571782156, -0.038266554353242554, -0.4889652798194959, -0.4613463584079085]\n367 Solução: [0.9381299997983225, 0.879446269189341, 0.7736794933227872, 0.5974049761255488, 0.3545859737075279]\n367 Tolerância: 0.8230989869801449\n=====================================================================\n368 Derivada: [-0.5358740003225648, 0.5857263524433165, -0.7732979487568628, -0.18916766299587096, -0.5246576250947612]\n368 Solução: [0.9378299911650427, 0.8806221301353949, 0.7737775887379994, 0.5986584271602423, 0.35576862428450134]\n368 Tolerância: 1.2406756425485375\n=====================================================================\n369 Derivada: [0.12122493248591582, -0.4634506935233276, -0.03186380837911429, -0.4885231346653711, -0.4591517139671879]\n369 Solução: [0.9384187199251628, 0.8799786319454548, 0.7746271592383427, 0.5988662529618736, 0.3563450303667588]\n369 Tolerância: 0.8246044393587605\n=====================================================================\n370 Derivada: [-0.5441042273804442, 0.6017202247731461, -0.7826453046079678, -0.18182375749806567, -0.5240487432453733]\n370 Solução: [0.9381079626519445, 0.8811666769361762, 0.7747088413643145, 0.6001185705678272, 0.3575220550241454]\n370 Tolerância: 1.256318938831611\n=====================================================================\n371 Derivada: [0.12629862183712248, -0.4696663596976123, -0.024461135088273522, -0.48853000607374497, -0.4568605801303818]\n371 Solução: [0.9387057334095646, 0.8805056073532955, 0.7755686811765059, 0.6003183281139066, 0.35809779216882415]\n371 Tolerância: 0.8273568365287449\n=====================================================================\n372 Derivada: [-0.5544656337038418, 0.6211130322788563, -0.7944610120617881, -0.17349395870941464, -0.5236638852915263]\n372 Solução: [0.9383819698526091, 0.8817095860585751, 0.7756313867229968, 0.6015706633345547, 0.3592689435583185]\n372 Tolerância: 1.2762076069271644\n=====================================================================\n373 Derivada: [0.13230612840840195, -0.47742288457845916, -0.01598914117238337, -0.48901134715703165, -0.45447351464653707]\n373 Solução: [0.9389911239911998, 0.8810272109010422, 0.7765042076590999, 0.6017612694903633, 0.359844257885421]\n373 Tolerância: 0.8314874912664524\n=====================================================================\n374 Derivada: [-0.567080440300515, 0.6441164604784149, -0.8089004741339068, -0.16410872949502675, -0.5235211009024283]\n374 Solução: [0.9386519603319498, 0.8822510732760602, 0.7765451954477498, 0.603014838617597, 0.3610092900884944]\n374 Tolerância: 1.3006891387534874\n=====================================================================\n375 Derivada: [0.13931149017741973, -0.48682178197242365, -0.006367956939925534, -0.49000096131570103, -0.4519843911039487]\n375 Solução: [0.9392749735109909, 0.8815434257975073, 0.7774338800506801, 0.6031951338526379, 0.36158444754798197]\n375 Tolerância: 0.8371585999473257\n=====================================================================\n376 Derivada: [-0.582102464394211, 0.6709889640300162, -0.8261557335086138, -0.15358326994848426, -0.5236404787884368]\n376 Solução: [0.9389178517710342, 0.8827913820257082, 0.7774502041590466, 0.6044512398325732, 0.3627430989411928]\n376 Tolerância: 1.330184173995051\n=====================================================================\n377 Derivada: [0.147391224296598, -0.49798825140862846, 0.00449751315403546, -0.49153939387814205, -0.44938446518911235]\n377 Solução: [0.9395573686386547, 0.8820542115329995, 0.7783578459561142, 0.6046199714523897, 0.36331838755314294]\n377 Tolerância: 0.8445673877510544\n=====================================================================\n378 Derivada: [-0.3863309820229688, 0.3588286596806256, -0.6032627736157679, -0.2418541596362047, -0.5026487497261769]\n378 Solução: [0.93928748724651, 0.8829660552550611, 0.7783496107635715, 0.6055200069636412, 0.36414123508461715]\n378 Tolerância: 0.9762606254705954\n=====================================================================\n379 Derivada: [0.3617765515060114, -0.8392863814134444, 0.250403347124319, -0.5891513132858393, -0.42587387904947605]\n379 Solução: [0.9399948804020697, 0.8823090203557435, 0.7794542178929948, 0.6059628551563344, 0.3650616124339693]\n379 Tolerância: 1.1943423170514367\n=====================================================================\n380 Derivada: [-0.2665460080601747, 0.1683530342484687, -0.46630321657437435, -0.29518547266606276, -0.4892683056827849]\n380 Solução: [0.9395974208117921, 0.8832310879134487, 0.7791791165594842, 0.6066101161206378, 0.36552949145147967]\n380 Tolerância: 0.8020886151541353\n=====================================================================\n381 Derivada: [0.38014602205538495, -0.867120316687533, 0.27354866660974153, -0.5959715831795904, -0.42214626058147076]\n381 Solução: [0.9402807052562823, 0.882799518855927, 0.780374473926191, 0.6073668171614077, 0.36678371928587156]\n381 Tolerância: 1.2266324257825956\n=====================================================================\n382 Derivada: [-0.2742557479134007, 0.18310723760390601, -0.4745649662291953, -0.2886218774825835, -0.48854558021349703]\n382 Solução: [0.9398630643629109, 0.8837521656882253, 0.7800739443852379, 0.6080215710980064, 0.36724750301942055]\n382 Tolerância: 0.8098986078555163\n=====================================================================\n383 Derivada: [0.40101603209689074, -0.8991603064039282, 0.299689523180831, -0.6040751160211073, -0.41812412013547373]\n383 Solução: [0.9405661125448178, 0.8832827745762036, 0.7812904805535501, 0.6087614465163577, 0.3684998781640108]\n383 Tolerância: 1.2644569785289148\n=====================================================================\n384 Derivada: [-0.2831745713581313, 0.19972255670683126, -0.4842480441494956, -0.2814892130727884, -0.4879426882825584]\n384 Solução: [0.9401255431736176, 0.8842706215925166, 0.7809612318098211, 0.6094251032600332, 0.3689592430420893]\n384 Tolerância: 0.8196934487408901\n=====================================================================\n385 Derivada: [0.4246953914050664, -0.9357989363647334, 0.3291398137676822, -0.6136093588998222, -0.41378316589562303]\n385 Solução: [0.9408514545503902, 0.883758637499396, 0.7822025903214348, 0.6101466942603418, 0.3702100726873449]\n385 Tolerância: 1.3084921839648676\n=====================================================================\n386 Derivada: [-0.2934018401318781, 0.21842463712084736, -0.4954951922786677, -0.27372991685481907, -0.4874825160184315]\n386 Solução: [0.940384870258075, 0.8847867369167889, 0.7818409865221295, 0.6108208256360863, 0.370664668450658]\n386 Tolerância: 0.83179127085491\n=====================================================================\n387 Derivada: [0.2384982325186229, -0.6359291601651762, 0.116975767248789, -0.5243803200245054, -0.4314722190212308]\n387 Solução: [0.9409221050727696, 0.8843867894611233, 0.7827482653165851, 0.6113220400834523, 0.37155727559668783]\n387 Tolerância: 0.9675296530868932\n=====================================================================\n388 Derivada: [-0.2105071412223083, 0.08680350024440031, -0.3996272382131849, -0.31081807079085877, -0.4776060896600427]\n388 Solução: [0.9406600830888014, 0.8850854421029063, 0.7826197519004182, 0.6118981415092605, 0.37203130513418675]\n388 Tolerância: 0.7323014978085693\n=====================================================================\n389 Derivada: [0.5238751790487868, -1.0918667166452565, 0.4481894227308203, -0.6568496138394124, -0.3995864499719062]\n389 Solução: [0.9415080732817136, 0.8847357697996757, 0.7842295784215406, 0.6131502162573194, 0.373955255446538]\n389 Tolerância: 1.5028681362617988\n=====================================================================\n390 Derivada: [-0.33692448158205934, 0.29361712810799645, -0.5447136247138076, -0.24646137066591223, -0.48887948464775377]\n390 Solução: [0.9409325268203563, 0.8859353304014589, 0.7837371828155131, 0.6138718527959612, 0.37439425423190753]\n390 Tolerância: 0.8922939061186932\n=====================================================================\n391 Derivada: [0.3040366282964442, -0.7380120616583667, 0.19677215166481687, -0.5515086977365655, -0.42069948073903163]\n391 Solução: [0.9415494539716907, 0.8853977013827065, 0.7847345832513904, 0.6143231370440068, 0.37528941930389437]\n391 Tolerância: 1.0756250512809182\n=====================================================================\n392 Derivada: [-0.23880566728763597, 0.13710859813801335, -0.43090403145399137, -0.29124200918347753, -0.47722277401041424]\n392 Solução: [0.9412154293556423, 0.8862085056496651, 0.7845184029089852, 0.6149290425957193, 0.375751613557636]\n392 Tolerância: 0.7576732030914056\n=====================================================================\n393 Derivada: [0.4914521464099266, -1.0376678262251744, 0.4158441650617135, -0.63892900267291, -0.3989406576377519]\n393 Solução: [0.9420025085813217, 0.8857566096352942, 0.7859386188329669, 0.6158889466787215, 0.3773244913528363]\n393 Tolerância: 1.4347787349214225\n=====================================================================\n394 Derivada: [-0.3222497058218323, 0.2743651603142325, -0.5270550411230772, -0.2477153088696582, -0.48431024079324914]\n394 Solução: [0.9414625831275022, 0.8868966255576608, 0.7854817587883435, 0.6165908950459159, 0.3777627806495575]\n394 Tolerância: 0.8676562764455552\n=====================================================================\n395 Derivada: [0.2869000514887148, -0.7076467929223327, 0.18074574991871373, -0.5398260023860502, -0.41886106818961366]\n395 Solução: [0.9420526399618927, 0.8863942479447807, 0.7864468253919624, 0.6170444753233872, 0.3786495791861662]\n395 Tolerância: 1.0404813100138879\n=====================================================================\n396 Derivada: [-0.23039321962869508, 0.12769375950682615, -0.4199880452756588, -0.28985140553162125, -0.47327687687671016]\n396 Solução: [0.9417374421514193, 0.8871716919311456, 0.7862482521803819, 0.6176375458826493, 0.3791097536995581]\n396 Tolerância: 0.7441649881022092\n=====================================================================\n397 Derivada: [0.4691085972403357, -0.9994226228695311, 0.39465427809406606, -0.6254454019181708, -0.39750169708294436]\n397 Solução: [0.9424967948040042, 0.8867508262687086, 0.7876324901225746, 0.6185928666772481, 0.38066962621856093]\n397 Tolerância: 1.3870292009509244\n=====================================================================\n398 Derivada: [-0.3119072568862862, 0.26201703924192543, -0.5143071939679658, -0.24701242434139203, -0.480277130881845]\n398 Solução: [0.9419814167064501, 0.8878488247557479, 0.787198909983067, 0.6192800015182227, 0.3811063346260007]\n398 Tolerância: 0.8497836287819058\n=====================================================================\n399 Derivada: [0.2757839742471333, -0.6869302560357937, 0.17151675300362967, -0.5310039356022145, -0.4164737740268123]\n399 Solução: [0.942552535951237, 0.8873690572278391, 0.7881406345813969, 0.6197322947756838, 0.38198574831780097]\n399 Tolerância: 1.016249751659025\n=====================================================================\n400 Derivada: [-0.22462307866487663, 0.12246575653017544, -0.41211725549874245, -0.2873095267030976, -0.46964685008555307]\n400 Solução: [0.9422495506279674, 0.8881237413470191, 0.7879522006486537, 0.620315673122903, 0.3824433000714925]\n400 Tolerância: 0.7337635512894132\n=====================================================================\n401 Derivada: [0.45547681216714864, -0.9751804008452609, 0.38334927944094943, -0.6161103745998844, -0.3952059652062445]\n401 Solução: [0.9429898854819655, 0.8877201066514241, 0.7893104972671188, 0.6212626161430427, 0.3839912083908663]\n401 Tolerância: 1.3568980651790967\n=====================================================================\n402 Derivada: [-0.30542084764249466, 0.25577485230297725, -0.506013158995529, -0.24458164744717692, -0.4766682156931239]\n402 Solução: [0.9424894837107858, 0.8887914718378996, 0.7888893371700767, 0.6219394952166998, 0.38442539463193764]\n402 Tolerância: 0.8377259435172972\n=====================================================================\n403 Derivada: [0.2700184304198956, -0.6748855606147117, 0.1683859827375329, -0.5247926034996766, -0.41355955762568897]\n403 Solução: [0.9430487259855218, 0.8883231340956457, 0.7898158749367922, 0.6223873375887344, 0.3852982002026648]\n403 Tolerância: 1.0015852915586703\n=====================================================================\n404 Derivada: [-0.22125717945658607, 0.12101471729363311, -0.4070519773081287, -0.2837130792133138, -0.4662920773429846]\n404 Solução: [0.9427520748778828, 0.8890645855172195, 0.7896308805709916, 0.6229638919626965, 0.38575255030259537]\n404 Tolerância: 0.7260936759142963\n=====================================================================\n405 Derivada: [0.4497531897917497, -0.9637239254593055, 0.38112241933973223, -0.6106284522920902, -0.39210748631407455]\n405 Solução: [0.9434813160699395, 0.8886657332995769, 0.7909724825469828, 0.623898981457174, 0.38728940163172876]\n405 Tolerância: 1.3427253711485805\n=====================================================================\n406 Derivada: [-0.30249027297656994, 0.25517632222977227, -0.5018764406725325, -0.2405277299761508, -0.473459729217538]\n406 Solução: [0.9429872024581077, 0.8897245120262778, 0.7905537689515169, 0.6245698379111082, 0.3877201837822203]\n406 Tolerância: 0.8309711104800858\n=====================================================================\n407 Derivada: [0.26921514345366404, -0.6709278631100801, 0.17097896773481125, -0.5210599195795709, -0.4101414679319788]\n407 Solução: [0.9435410786903646, 0.88925727022532, 0.7914727321607561, 0.6250102573385548, 0.38858711443874655]\n407 Tolerância: 0.9957782192233189\n=====================================================================\n408 Derivada: [-0.2201559259805208, 0.12313281218615657, -0.40466274790961165, -0.27910300316486314, -0.4632037636242501]\n408 Solução: [0.9432453101001446, 0.8899943735905532, 0.7912848890565553, 0.6255827108634835, 0.38903770931318354]\n408 Tolerância: 0.7209982480826743\n=====================================================================\n409 Derivada: [0.4515153715314, -0.9644300854132553, 0.3875967281438193, -0.6088777186421197, -0.38822669405523413]\n409 Solução: [0.9439709216725901, 0.8895885403472639, 0.792618616375105, 0.6265026060155161, 0.3905643818739568]\n409 Tolerância: 1.3437539902389273\n=====================================================================\n410 Derivada: [-0.30297576938119164, 0.26002452916441143, -0.5017840147986306, -0.23487858601689027, -0.4706477886316236]\n410 Solução: [0.9434748720700776, 0.8906480948844611, 0.7921927898915485, 0.6271715390560165, 0.3909909004587343]\n410 Tolerância: 0.8293802271695788\n=====================================================================\n411 Derivada: [0.2732159268246619, -0.6748363677842235, 0.1791843544368703, -0.5197809989587086, -0.4062208680270629]\n411 Solução: [0.944029637272802, 0.8901719757514696, 0.7931115838639581, 0.6276016145919361, 0.39185268229826975]\n411 Tolerância: 0.9986720078829657\n=====================================================================\n412 Derivada: [-0.22127921321691701, 0.12877505424188485, -0.4049346407170731, -0.27347214554325205, -0.4603849467444405]\n412 Solução: [0.9437294732906948, 0.8909133731281856, 0.7929147260526871, 0.6281726630527061, 0.3922989698730065]\n412 Tolerância: 0.7185129655242654\n=====================================================================\n413 Derivada: [0.46067664501578065, -0.9772131143789125, 0.40278243768457855, -0.610896559995453, -0.3835489818856246]\n413 Solução: [0.9444587871037876, 0.8904889436281208, 0.7942493495023161, 0.629073999469902, 0.39381635189963005]\n413 Tolerância: 1.360039176937208\n=====================================================================\n414 Derivada: [-0.3068864111814946, 0.27036290693672527, -0.5057959761862207, -0.22758972097483365, -0.4682446302804806]\n414 Solução: [0.9439526726256209, 0.8915625420203828, 0.7938068394999771, 0.6297451504757563, 0.39423773139633056]\n414 Tolerância: 0.8331596817064616\n=====================================================================\n415 Derivada: [0.2820734154142315, -0.6867304788871706, 0.1931369627432673, -0.5210338019096241, -0.40177634140293605]\n415 Solução: [0.9445145984273449, 0.8910674927523101, 0.7947329795930915, 0.6301618797011741, 0.39509511292150234]\n415 Tolerância: 1.0106267856816742\n=====================================================================\n416 Derivada: [-0.22468200264950156, 0.13805013736327965, -0.4079644154577977, -0.2667663606327295, -0.45784906409664927]\n416 Solução: [0.9442047033176368, 0.8918219573897594, 0.7945207929885152, 0.6307343045323737, 0.3955365175934538]\n416 Tolerância: 0.7188635918446856\n=====================================================================\n417 Derivada: [0.32130569906951223, -0.7491093441029761, 0.24129494704808963, -0.5390256846506247, -0.3957364134511039]\n417 Solução: [0.9447806703654444, 0.8914680690981788, 0.7955666002058752, 0.6314181538455191, 0.3967102029384281]\n417 Tolerância: 1.0815643350222388\n=====================================================================\n418 Derivada: [-0.24239552763157235, 0.16881208228602418, -0.4285688288295262, -0.25535377626927414, -0.4584208857177572]\n418 Solução: [0.9444276733816034, 0.8922910652037607, 0.7953015056595577, 0.6320103451494566, 0.39714497194734655]\n418 Tolerância: 0.7391073071639023\n=====================================================================\n419 Derivada: [0.366170312880854, -0.8209331048402646, 0.296521323054435, -0.5601533462897947, -0.38896027044488335]\n419 Solução: [0.9450490486355417, 0.8918583193873536, 0.7964001318076803, 0.6326649385700688, 0.3983201231436445]\n419 Tolerância: 1.1666184547777836\n=====================================================================\n420 Derivada: [-0.2628772187292725, 0.2040463366610652, -0.45255842036183935, -0.24250730289955413, -0.4593086329749809]\n420 Solução: [0.9446467619148474, 0.8927602234331986, 0.7960743637525668, 0.6332803414163344, 0.39874744765951414]\n420 Tolerância: 0.7650638377004912\n=====================================================================\n421 Derivada: [0.22314183637507767, -0.587362727529694, 0.12760658821787274, -0.4869844559873542, -0.40362599117418085]\n421 Solução: [0.9451281044784386, 0.8923866034319882, 0.796903022969538, 0.6337243855500616, 0.3995884668849322]\n421 Tolerância: 0.9006322753006404\n=====================================================================\n422 Derivada: [-0.1964465424833861, 0.09714471981988027, -0.37292955220203794, -0.2741857313682061, -0.4506172420804262]\n422 Solução: [0.9448829535351555, 0.8930318993972918, 0.7967628301846307, 0.6342594026525867, 0.400031903642814]\n422 Tolerância: 0.6821572441118396\n=====================================================================\n423 Derivada: [0.5190184604576302, -1.0671432944629373, 0.4830699669103353, -0.6342574996132697, -0.3680329330639722]\n423 Solução: [0.9456743031325617, 0.8926405693491892, 0.7982651098748976, 0.6353639106036549, 0.4018471342322493]\n423 Tolerância: 1.4762324958417354\n=====================================================================\n424 Derivada: [-0.3332985278157139, 0.3231839781137751, -0.5360469708451774, -0.2010482503807367, -0.4643561534027185]\n424 Solução: [0.9451040924216098, 0.8938129679881256, 0.7977343933585166, 0.6360607267043042, 0.402251467288594]\n424 Tolerância: 0.8711654168353918\n=====================================================================\n425 Derivada: [0.0634377082118931, -0.3244205680504706, -0.06047206819306439, -0.4025304053837999, -0.41853131005022703]\n425 Solução: [0.945470265120626, 0.8934579074652955, 0.7983233121497283, 0.6362816049090683, 0.4027616241954085]\n425 Tolerância: 0.6709166144758375\n=====================================================================\n426 Derivada: [-0.6139049438888264, 0.7854004792393425, -0.8722575072773395, -0.054429626073783766, -0.4945592385014095]\n426 Solução: [0.9452147177120577, 0.894764777429366, 0.7985669130103693, 0.6379031263174747, 0.40444760237310107]\n426 Tolerância: 1.4149621113315494\n=====================================================================\n427 Derivada: [0.19613463703137768, -0.5391946626325534, 0.10056393071442926, -0.4677614584578862, -0.4011503288841993]\n427 Solução: [0.9458891738271699, 0.8939019106919204, 0.7995252037288136, 0.6379629244906515, 0.4049909413802437]\n427 Tolerância: 0.8479594050413911\n=====================================================================\n428 Derivada: [-0.43501463933438345, 0.493995785536697, -0.6579264461825574, -0.14343690991080393, -0.4729566924035993]\n428 Solução: [0.9455300405806525, 0.8948892056064087, 0.7993410656720855, 0.6388194213017927, 0.4057254695703393]\n428 Tolerância: 1.053754696864391\n=====================================================================\n429 Derivada: [0.11322226038305416, -0.40272138195575735, 0.0018750053307883263, -0.42401247694324695, -0.4093077252979356]\n429 Solução: [0.946007961937343, 0.8943464856271814, 0.8000638852540732, 0.6389770057975444, 0.40624507531150533]\n429 Tolerância: 0.7227217320433305\n=====================================================================\n430 Derivada: [-0.48565921362802555, 0.5790038594887505, -0.7189384266239358, -0.11471722864401102, -0.47750727570168294]\n430 Solução: [0.9457177193264978, 0.8953788524510427, 0.8000590787218532, 0.6400639518443959, 0.40729432607215676]\n430 Tolerância: 1.152890299749706\n=====================================================================\n431 Derivada: [0.13805324922401496, -0.442034484286296, 0.03305326823007704, -0.4350537386981159, -0.4048568648121602]\n431 Solução: [0.9462512804742825, 0.8947427398124441, 0.8008489280675094, 0.6401899839559433, 0.40781893123345014]\n431 Tolerância: 0.7541396024616327\n=====================================================================\n432 Derivada: [-0.3497896244934964, 0.3577945195793859, -0.5552248918440625, -0.18250135229720854, -0.46071274644691584]\n432 Solução: [0.9459984974251663, 0.8955521291269332, 0.8007884057257795, 0.6409865911435009, 0.408560246293531]\n432 Tolerância: 0.8967760916173924\n=====================================================================\n433 Derivada: [0.07497707763377548, -0.3377021042376782, -0.04214503270648606, -0.4011599130174659, -0.41089758930081643]\n433 Solução: [0.9463827877841069, 0.8951590443275905, 0.8013983940102761, 0.6411870931174602, 0.40906640043391457]\n433 Tolerância: 0.6717191242359346\n=====================================================================\n434 Derivada: [-0.5237892910316191, 0.6456644237302953, -0.7653358103192431, -0.08937063175535798, -0.4794983760403255]\n434 Solução: [0.9461356709510854, 0.8962720761652879, 0.8015372997577218, 0.6425092754479621, 0.41042067715646363]\n434 Tolerância: 1.2308056201013442\n=====================================================================\n435 Derivada: [0.15820692160605176, -0.47249402107922833, 0.06002647904116998, -0.44213951781438254, -0.3993758142029975]\n435 Solução: [0.9467111230530488, 0.8955627280435139, 0.8023781227915198, 0.6426074609564824, 0.410947469805922]\n435 Tolerância: 0.7790196418378702\n=====================================================================\n436 Derivada: [-0.3775887311924748, 0.40715843179350486, -0.588700079306733, -0.16293505070136405, -0.46134838733308925]\n436 Solução: [0.9464214375276472, 0.8964278904356268, 0.8022682110256973, 0.6434170425931054, 0.41167874876259253]\n436 Tolerância: 0.9456792199904474\n=====================================================================\n437 Derivada: [0.08998853846043176, -0.3596573817475246, -0.021705664132639413, -0.4054199618713312, -0.406054451861678]\n437 Solução: [0.9468362688973655, 0.8959805728225724, 0.8029149762495451, 0.6435960483861123, 0.4121856012389106]\n437 Tolerância: 0.6834969411164966\n=====================================================================\n438 Derivada: [-0.4250042320018679, 0.4869553609061086, -0.6461464268516863, -0.13568944386088333, -0.4656400372746816]\n438 Solução: [0.9466055853881286, 0.8969025460912122, 0.8029706182108226, 0.6446353329563391, 0.4132265123093568]\n438 Tolerância: 1.0346456374799733\n=====================================================================\n439 Derivada: [0.11340227366019917, -0.39677023384172116, 0.007883880129412546, -0.4159077813855987, -0.4017700541170228]\n439 Solução: [0.9470725089828572, 0.896367560953498, 0.8036804958770415, 0.6447844058316745, 0.4137380797331205]\n439 Tolerância: 0.7104557049544314\n=====================================================================\n440 Derivada: [-0.4809401125272643, 0.5808590461168706, -0.7142746763896923, -0.10377422735579955, -0.4709916515310226]\n440 Solução: [0.9467818049121951, 0.8973846721486489, 0.8036602857351082, 0.6458505756814179, 0.41476800785036383]\n440 Tolerância: 1.1452035464057695\n=====================================================================\n441 Derivada: [0.14094322790253955, -0.44078799137912483, 0.042603909281496044, -0.4286671307932721, -0.39695818518140413]\n441 Solução: [0.947310181500665, 0.8967465213411475, 0.8044450113317277, 0.6459645854526829, 0.4152854547331494]\n441 Tolerância: 0.7465289342863723\n=====================================================================\n442 Derivada: [-0.3505895690196894, 0.3678704887263393, -0.5557525664098364, -0.16993293118915176, -0.4545213634351626]\n442 Solução: [0.9470521067425427, 0.897553628258956, 0.804367001243932, 0.6467494984118991, 0.41601230687886726]\n442 Tolerância: 0.8958624494718946\n=====================================================================\n443 Derivada: [0.07978330201501649, -0.33931103427426024, -0.03100838826074437, -0.3952934421694607, -0.40295559054050045]\n443 Solução: [0.947437275946788, 0.8971494736692909, 0.804977569249021, 0.6469361923060278, 0.41651165896271936]\n443 Tolerância: 0.6641453054665378\n=====================================================================\n444 Derivada: [-0.5332509952245346, 0.6709115023444383, -0.7784647432124814, -0.07080941589561007, -0.4748184932446833]\n444 Solução: [0.947174318286338, 0.898267808376982, 0.805079769747439, 0.6482390393444282, 0.4178397596639637]\n444 Tolerância: 1.2533765592261294\n=====================================================================\n445 Derivada: [0.16794754329299622, -0.4827935896944382, 0.07809047940605751, -0.43970577388267884, -0.39066099414066]\n445 Solução: [0.94776016532699, 0.8975307229862227, 0.8059350166577066, 0.648316832892165, 0.4183614108406241]\n445 Tolerância: 0.7831674983365925\n=====================================================================\n446 Derivada: [-0.38866026474369164, 0.4341440487744421, -0.6021791727175128, -0.14485295475850535, -0.45651682918702363]\n446 Solução: [0.9474526441905893, 0.8984147444517276, 0.8057920287193411, 0.6491219582105537, 0.4190767324851688]\n446 Tolerância: 0.9651671390826013\n=====================================================================\n447 Derivada: [0.09975461105108252, -0.36968224904279623, -0.004398554625112183, -0.4024786398839524, -0.3975335823681547]\n447 Solução: [0.9478796391103517, 0.8979377795543925, 0.8064536025174927, 0.649281098419639, 0.4195782768531721]\n447 Tolerância: 0.6831231950676082\n=====================================================================\n448 Derivada: [-0.4437576519199524, 0.526823590172512, -0.6696352752889823, -0.11317993859447029, -0.4618897740410688]\n448 Solução: [0.9476239205029209, 0.898885451335386, 0.806464878109183, 0.6503128429798883, 0.42059734487437955]\n448 Tolerância: 1.0719268690424142\n=====================================================================\n449 Derivada: [0.12705037171519962, -0.4134008919564849, 0.03019020427933583, -0.41532158215856896, -0.3927482044479831]\n449 Solução: [0.9481114472201181, 0.8983066656528235, 0.8072005613950229, 0.6504371861741449, 0.4211047921358993]\n449 Tolerância: 0.7174236625325564\n=====================================================================\n450 Derivada: [-0.32770855080207184, 0.3368094612327468, -0.5274469680574612, -0.17274307798756183, -0.4468920668921328]\n450 Solução: [0.9478788110414403, 0.8990636252938571, 0.8071452814799607, 0.6511976627039763, 0.421823935576661]\n450 Tolerância: 0.8535727865867299\n=====================================================================\n451 Derivada: [0.07220791801972837, -0.3220817378781362, -0.03626154843573204, -0.38490552455790805, -0.398139245698232]\n451 Solução: [0.9482388424082884, 0.8986935953681864, 0.8077247520259222, 0.6513874439175856, 0.42231490586499465]\n451 Tolerância: 0.6457032220723826\n=====================================================================\n452 Derivada: [-0.5019796061295665, 0.6266669089468451, -0.741441434954254, -0.07694470919248886, -0.4665583403955651]\n452 Solução: [0.948000852444112, 0.8997551440648062, 0.8078442664067529, 0.6526560534345611, 0.42362713238279887]\n452 Tolerância: 1.1908096046267815\n=====================================================================\n453 Derivada: [0.15696307685453803, -0.4603846139621055, 0.06935680989536763, -0.4281612368956331, -0.3861131553526178]\n453 Solução: [0.9485523437106118, 0.8990666672361292, 0.8086588382957407, 0.6527405874168283, 0.42413970868450296]\n453 Tolerância: 0.7575006144313653\n=====================================================================\n454 Derivada: [-0.37022399660344263, 0.41042305027332304, -0.5796331558978807, -0.145222043438892, -0.4495331816822983]\n454 Solução: [0.9482649357329729, 0.8999096566415774, 0.8085318421838718, 0.6535245740566519, 0.4248467029875168]\n454 Tolerância: 0.9298693492135328\n=====================================================================\n455 Derivada: [0.09436210587205096, -0.3562264989024584, -0.0068905318602503485, -0.39348366667495327, -0.392470706689906]\n455 Solução: [0.9486716759636163, 0.8994587524115408, 0.8091686461881541, 0.6536841197586721, 0.4253405748912205]\n455 Tolerância: 0.6668670456124433\n=====================================================================\n456 Derivada: [-0.42669967167296363, 0.5055301308826756, -0.6492615591550077, -0.11251619132913504, -0.4552011537545013]\n456 Solução: [0.9484297809168252, 0.9003719306924187, 0.809186309905081, 0.6546928059159199, 0.4263466643492879]\n456 Tolerância: 1.0387694060392352\n=====================================================================\n457 Derivada: [0.1224781296239712, -0.40147086439117174, 0.02890602742186843, -0.4070026293668434, -0.3875560983735795]\n457 Solução: [0.9488985671772081, 0.8998165387029236, 0.8098996099578636, 0.6548164198956516, 0.42684676327309046]\n457 Tolerância: 0.7020442830125251\n=====================================================================\n458 Derivada: [-0.31846363893259877, 0.3278763434797156, -0.5156897240706826, -0.1687478052305238, -0.4409391161556755]\n458 Solução: [0.9486743030238439, 0.9005516538110617, 0.8098466814408558, 0.6555616639679787, 0.42755639968368664]\n458 Tolerância: 0.8353208984421405\n=====================================================================\n459 Derivada: [0.07094219461413331, -0.3154122515050517, -0.03397446580817132, -0.3780270407212072, -0.39267427657144083]\n459 Solução: [0.9490241776271634, 0.9001914381016723, 0.810413235092789, 0.6557470558438423, 0.42804082986501]\n459 Tolerância: 0.6346412204548675\n=====================================================================\n460 Derivada: [-0.4925083031893678, 0.6180275192871818, -0.730948764273819, -0.07195680574969288, -0.4609501046471536]\n460 Solução: [0.9487903593587819, 0.9012310048485762, 0.810525211481561, 0.656992994576688, 0.4293350443996083]\n460 Tolerância: 1.173226883500409\n=====================================================================\n461 Derivada: [0.15618734667918943, -0.4549581389301238, 0.07295015759157764, -0.4221962178570209, -0.3804435706071132]\n461 Solução: [0.9493314451410944, 0.9005520195368594, 0.8113282557782485, 0.6570720486845674, 0.42984145930949896]\n461 Tolerância: 0.7481243731647471\n=====================================================================\n462 Derivada: [-0.36680370700435105, 0.4111820062332754, -0.5754834366891259, -0.13792451620730617, -0.44442409375352554]\n462 Solução: [0.9490454575678292, 0.9013850727697637, 0.8111946800502366, 0.6578451130483193, 0.43053807229278834]\n462 Tolerância: 0.9226769396193579\n=====================================================================\n463 Derivada: [0.09600457504194537, -0.3545675242994548, -0.0008706511324589883, -0.38844113848307416, -0.3866497942605065]\n463 Solução: [0.9494484401560909, 0.9009333347258062, 0.8118269250368336, 0.6579966414474728, 0.43102633118485156]\n463 Tolerância: 0.6597881078179132\n=====================================================================\n464 Derivada: [-0.4270533111791224, 0.5127303223198965, -0.6503145613887682, -0.10283891317945404, -0.45069719429847055]\n464 Solução: [0.949202334678078, 0.9018422602641715, 0.8118291569306058, 0.658992401201885, 0.4320174988703338]\n464 Tolerância: 1.0401344844692093\n=====================================================================\n465 Derivada: [0.1261314012110688, -0.4033157793546138, 0.03764339639849368, -0.4033041324074276, -0.38145136773630384]\n465 Solução: [0.9496715094584262, 0.9012789579081071, 0.812543613846194, 0.6591053834063058, 0.43251264959649177]\n465 Tolerância: 0.6986767082886806\n=====================================================================\n466 Derivada: [-0.32142017995772676, 0.33887437695079825, -0.5190525604102163, -0.15844021917551743, -0.4365611006517156]\n466 Solução: [0.9494405559649977, 0.9020174511564371, 0.8124746867287651, 0.6598438553284385, 0.43321110791143863]\n466 Tolerância: 0.8386007593371674\n=====================================================================\n467 Derivada: [0.07544336377418404, -0.31848216588701916, -0.024649624932692404, -0.37448532630182285, -0.38658607133100986]\n467 Solução: [0.9497936787212988, 0.9016451526466034, 0.8130449349030439, 0.6600179229520444, 0.43369072826127575]\n467 Tolerância: 0.6304111716153993\n=====================================================================\n468 Derivada: [-0.5031687131904619, 0.6425181799598363, -0.7454781802115917, -0.056298078564537946, -0.45791603550939897]\n468 Solução: [0.9495450250565157, 0.9026948375195221, 0.8131261775633445, 0.6612521885538692, 0.4349648766897349]\n468 Tolerância: 1.197748993700657\n=====================================================================\n469 Derivada: [0.1650550856630275, -0.46570002060258275, 0.08844018312112212, -0.4217033054896433, -0.37366010575166797]\n469 Solução: [0.9500978227150502, 0.9019889459643904, 0.8139451843531277, 0.6613140394702609, 0.4354679582717154]\n469 Tolerância: 0.7545839310336604\n=====================================================================\n470 Derivada: [-0.3777455169101245, 0.4355439910333416, -0.589304317634685, -0.12299991275141053, -0.4411975142943163]\n470 Solução: [0.9497955978267512, 0.9028416681700836, 0.8137832455412604, 0.662086201284512, 0.43615215035988375]\n470 Tolerância: 0.9431087146295323\n=====================================================================\n471 Derivada: [0.10453474526178752, -0.36454138755044596, 0.013667804197211808, -0.3874064737212848, -0.3800458973982188]\n471 Solução: [0.9502106014464033, 0.9023631652502472, 0.8144306746011617, 0.6622213330245953, 0.4366368644258809]\n471 Tolerância: 0.6622109974064933\n=====================================================================\n472 Derivada: [-0.44462474142477504, 0.5483412964782133, -0.6730041068985884, -0.08388604494571439, -0.448458166358634]\n472 Solução: [0.9499426290769578, 0.903297658553294, 0.8143956375054413, 0.6632144404401406, 0.4376111031765355]\n472 Tolerância: 1.0767801361109914\n=====================================================================\n473 Derivada: [0.13811225753329381, -0.4192011369616466, 0.05675139420432629, -0.40444276250714495, -0.37437042064706816]\n473 Solução: [0.9504311084071364, 0.9026952328125343, 0.8151350219002274, 0.6633066004016288, 0.4381037940331307]\n473 Tolerância: 0.7083448804465863\n=====================================================================\n474 Derivada: [-0.33687204021026673, 0.37046187432684974, -0.5382177483815269, -0.1413976218656643, -0.43386805244961124]\n474 Solução: [0.9501782173105788, 0.9034628130193733, 0.8150311069938474, 0.664047157217743, 0.43878928674671785]\n474 Tolerância: 0.8652386323343144\n=====================================================================\n475 Derivada: [0.08599332788566016, -0.33182429068369856, -0.007777027821646243, -0.3745460828409364, -0.3798028574925496]\n475 Solução: [0.9505483159875676, 0.9030558114484575, 0.8156224106724892, 0.6642025012847341, 0.4392659484254345]\n475 Tolerância: 0.6341117198265023\n=====================================================================\n476 Derivada: [-0.39712642566883005, 0.4722997244317071, -0.6137927348565881, -0.10581856751191765, -0.4403550702999155]\n476 Solução: [0.9503278741070014, 0.9039064352404933, 0.8156423469010359, 0.6651626413896731, 0.4402395641489872]\n476 Tolerância: 0.9811361303074181\n=====================================================================\n477 Derivada: [0.11638435711341799, -0.38123774565195845, 0.03138392372147791, -0.38991108207916625, -0.3745316603192492]\n477 Solução: [0.950764170228952, 0.9033875512658979, 0.8163166797396234, 0.6652788971401135, 0.44072335267836943]\n477 Tolerância: 0.6724411531638187\n=====================================================================\n478 Derivada: [-0.3035678476844623, 0.3178193424554365, -0.49633526957785534, -0.15588753407388367, -0.42746596651623747]\n478 Solução: [0.9505510641063077, 0.9040856184271259, 0.816259214058981, 0.6659928456546627, 0.44140914063061415]\n478 Tolerância: 0.8040767054347505\n=====================================================================\n479 Derivada: [0.071034528220423, -0.30500317897519835, -0.024889648767270955, -0.36359047452285154, -0.3791702210337604]\n479 Solução: [0.9508845737045939, 0.9037364516690571, 0.8168045042721404, 0.6661641088146559, 0.4418787687676559]\n479 Tolerância: 0.6120951132649044\n=====================================================================\n480 Derivada: [-0.4810244586242902, 0.6153083501333185, -0.7196616408444925, -0.05443089243418342, -0.44883716821102837]\n480 Solução: [0.9506504511140237, 0.9047417111700741, 0.8168865380266224, 0.6673624660915257, 0.4431284753067076]\n480 Tolerância: 1.154261364882513\n=====================================================================\n481 Derivada: [0.1594140677706264, -0.45080293807484395, 0.08771373847039854, -0.41110422818253767, -0.3661796292342103]\n481 Solução: [0.9511789203678834, 0.9040657132268124, 0.8176771819191517, 0.6674222656559675, 0.4436215825471738]\n481 Tolerância: 0.7344549279514753\n=====================================================================\n482 Derivada: [-0.3663854915442357, 0.4254695126643355, -0.5754796165875291, -0.11646747909475152, -0.43315597031326547]\n482 Solução: [0.9508870244918386, 0.9048911580597131, 0.8175165732671673, 0.6681750199800323, 0.44429207747375016]\n482 Tolerância: 0.920667253698849\n=====================================================================\n483 Derivada: [0.10352193040046132, -0.3570186799267958, 0.017976234216803277, -0.378834895432675, -0.37218190472785295]\n483 Solução: [0.951289547614873, 0.9044237232923816, 0.8181488140568753, 0.6683029749741549, 0.4447679568356666]\n483 Tolerância: 0.6484886437644576\n=====================================================================\n484 Derivada: [-0.43726087407924297, 0.5452552473712444, -0.6650835413246909, -0.07453292954104995, -0.4411594040698219]\n484 Solução: [0.9510241715725867, 0.9053389323107487, 0.8181027324017786, 0.6692741093496537, 0.4457220364254231]\n484 Tolerância: 1.063491067716426\n=====================================================================\n485 Derivada: [0.13937505619958301, -0.4157440015981706, 0.06431633243880697, -0.39755549815076563, -0.3661475283794289]\n485 Solução: [0.9515045607164726, 0.9047398970047988, 0.8188334150033315, 0.6693559936716592, 0.4462067086222771]\n485 Tolerância: 0.6989416702573569\n=====================================================================\n486 Derivada: [-0.3356207326290246, 0.3768453917826946, -0.5366826715599586, -0.12976361339528353, -0.4270682491463873]\n486 Solução: [0.9512493573664977, 0.9055011470077252, 0.8187156482813366, 0.6700839395300896, 0.44687714477043283]\n486 Tolerância: 0.8613407573204647\n=====================================================================\n487 Derivada: [0.08969255234740103, -0.332160616013482, 0.002186731565757327, -0.3685647741400686, -0.3714456293092496]\n487 Solução: [0.9516180813159193, 0.9050871322950733, 0.8193052654742126, 0.6702265020936342, 0.447346335962122]\n487 Tolerância: 0.6262523922843346\n=====================================================================\n488 Derivada: [-0.40136109572245005, 0.48814456630844916, -0.6200028434654712, -0.09056150522002326, -0.434479647700428]\n488 Solução: [0.951388156560146, 0.9059386182492094, 0.8192996598390953, 0.6711713092539053, 0.4482985281270993]\n488 Tolerância: 0.9903299062306781\n=====================================================================\n489 Derivada: [0.12308092552501648, -0.38682976617263876, 0.0454985367637164, -0.38600306846294075, -0.3657258726999828]\n489 Solução: [0.9518291050295676, 0.9054023266114194, 0.8199808153067697, 0.6712708030950895, 0.44877586172442646]\n489 Tolerância: 0.6705297024725169\n=====================================================================\n490 Derivada: [-0.31031739177825557, 0.3372280898819895, -0.504592384808177, -0.14020017239386107, -0.4216726980138077]\n490 Solução: [0.9516037371239432, 0.9061106330680343, 0.8198975049977542, 0.6719775958229879, 0.4494455257979738]\n490 Tolerância: 0.8136934899834397\n=====================================================================\n491 Derivada: [0.07847455601489628, -0.3116154368511843, -0.010419484120689049, -0.3597146429010252, -0.3704262335927524]\n491 Solução: [0.9519446619928402, 0.9057401432231933, 0.820451866748642, 0.6721316243326979, 0.44990878926014716]\n491 Tolerância: 0.6082608165303395\n=====================================================================\n492 Derivada: [-0.3718650186810919, 0.4415772179060582, -0.5827972651565574, -0.10322781444665452, -0.4285949215811655]\n492 Solução: [0.9517434943077434, 0.9065389620920745, 0.8204785768519788, 0.6730537443889627, 0.45085836822809733]\n492 Tolerância: 0.9312766894145393\n=====================================================================\n493 Derivada: [0.10984379836821745, -0.36298791783463, 0.030418395416973, -0.3761175234267853, -0.364956070886862]\n493 Solução: [0.9521520374190874, 0.9060538308712305, 0.821118857050515, 0.6731671538530765, 0.45132923667221725]\n493 Tolerância: 0.6476175510434347\n=====================================================================\n494 Derivada: [-0.2895161292427133, 0.3050052686822369, -0.4780519160173924, -0.14827880891898815, -0.41684189254304727]\n494 Solução: [0.9519509074171925, 0.9067184815996875, 0.8210631593050005, 0.673855845607398, 0.4519974911965462]\n494 Tolerância: 0.7753226463716961\n=====================================================================\n495 Derivada: [0.06940093626047883, -0.2946462010220898, -0.020403719111982355, -0.35205126282974675, -0.36916547953086365]\n495 Solução: [0.9522689793365265, 0.9063833928035278, 0.8215883628260157, 0.6740187495722748, 0.4524554473773186]\n495 Tolerância: 0.5935254235678986\n=====================================================================\n496 Derivada: [-0.4667949685624535, 0.6034816703927959, -0.704101767154981, -0.04471544508666625, -0.43892074308458007]\n496 Solução: [0.9520402408991446, 0.9073545167570918, 0.821655611411956, 0.6751790747793552, 0.4536721793044833]\n496 Tolerância: 1.128050467587903\n=====================================================================\n497 Derivada: [0.15881021941856943, -0.4429752924443733, 0.09489928630506483, -0.4014286459865133, -0.35574617945067644]\n497 Solução: [0.9525530771683173, 0.906691511992256, 0.8224291607166917, 0.675228200634553, 0.45415439203492286]\n497 Tolerância: 0.7198291269888161\n=====================================================================\n498 Derivada: [-0.3617892070039943, 0.42877796254117584, -0.5703374913336177, -0.10285612605828476, -0.4241033951751376]\n498 Solução: [0.952262286971628, 0.9075026239779329, 0.8222553949336624, 0.6759632384384834, 0.4548057827443662]\n498 Tolerância: 0.9113012114296031\n=====================================================================\n499 Derivada: [0.10692700831242519, -0.35551259191362306, 0.029329243761367252, -0.3707857001877102, -0.36147300773083657]\n499 Solução: [0.9526597604656509, 0.9070315544390083, 0.8228819864158404, 0.6760762395535377, 0.4552717166501983]\n499 Tolerância: 0.6378303563151172\n=====================================================================\n500 Derivada: [-0.2836221594830022, 0.2989373707342793, -0.47034942273691627, -0.14601315939306403, -0.4127929051139887]\n500 Solução: [0.9524639712658601, 0.907682517436897, 0.8228282829665703, 0.6767551684479244, 0.4559335934954086]\n500 Tolerância: 0.7633816120844584\n=====================================================================\n"
],
[
"df",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
],
[
"np.array(m)",
"_____no_output_____"
],
[
"# Para n = 10\n\nimport numpy as np\nimport sympy as sym \n\nvariaveis = list(sym.symbols(\"x:10\"))\nc = variaveis\n\ndef f1(c):\n fo = 0\n for i in range(1,10):\n fo = fo + 100*(c[i] - c[i-1]**2)**2 + (1 - c[i-1])**2\n return fo\n\nx = []\nfor i in range(1,11):\n if (i%2 != 0):\n x.append(-1.2)\n else: \n x.append(1)\n\neps = 1e-3\nnmax = 100\nd1f = gradiente_simbolico(f1(c),c)\np = Parametros(f1,d1f,c,x,eps,nmax)\nm,df = steepestDescent(p)",
"1 Derivada: [-311.2609414050821, -135.48916022479534, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, 64.00224724854343, 173.30654859542847]\n1 Solução: [-0.963134765625, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 1.0966796875]\n1 Tolerância: 440.13885306532853\n=====================================================================\n2 Derivada: [32.344427502331506, 103.91754146603932, -76.73085751243768, 22.866067573663948, -51.16193967003774, 22.866067573663948, -51.16193967003774, 39.51768451536306, -24.577235216666367, 84.60459004669234]\n2 Solução: [-0.39319895982956155, 0.37797087443505006, -0.25722512348593796, 0.12254547200609522, -0.25722512348593796, 0.12254547200609522, -0.25722512348593796, 0.12254547200609522, -0.5969279429599794, 0.7793459193198942]\n2 Tolerância: 182.5844152190473\n=====================================================================\n3 Derivada: [-45.84945539178375, -44.71655507442835, -3.0442231217130393, 9.240988415872408, -18.948622462987316, 7.674722674472364, -20.893654334488712, -6.714968045612909, -57.87189940972095, 46.86406833498229]\n3 Solução: [-0.499802907896328, 0.03546921188828961, -0.004328010102659485, 0.0471812356182868, -0.0886005664679913, 0.0471812356182868, -0.0886005664679913, -0.007700802641707832, -0.5159238718112987, 0.5004977831796729]\n3 Tolerância: 103.15711759077486\n=====================================================================\n4 Derivada: [19.286337447357656, 49.97932835502163, -15.281771848531005, -2.73192643336334, 1.0962454611246795, -1.08591165730554, 2.9832744412465706, 6.080229002974454, -26.024157009151285, 40.770449847736394]\n4 Solução: [-0.24794432725298465, 0.2811045852219414, 0.012394406947766147, -0.0035810298497623155, 0.015487325479570851, 0.0050227248332213245, 0.02617170465651944, 0.02918561850728886, -0.1980240337452046, 0.24306576717940392]\n4 Tolerância: 74.15273727923295\n=====================================================================\n5 Derivada: [-6.072647046246944, -9.25056711423256, 11.608228382864777, -1.626903854272757, 0.18430630432609624, -0.1791667952863099, 0.8160378924229086, -0.9172210848174172, -18.214131576336595, 14.02897729534454]\n5 Solução: [-0.3256358721459049, 0.07977184160430059, 0.07395427889619427, 0.007424044894010905, 0.011071297621036376, 0.00939712481992577, 0.014154119627083793, 0.004692508509955246, -0.09319039344955124, 0.07882933590800487]\n5 Tolerância: 28.106723241442662\n=====================================================================\n6 Derivada: [1.6515458363451696, 5.234228415043933, -1.4011697450156797, 0.9338773081792933, -0.028232825778919682, 0.008062342991905856, 0.016581328989840154, -0.16737046674071318, -3.2938006531847512, 2.399806486844868]\n6 Solução: [-0.29672556516303983, 0.12381140672332766, 0.018690496311755025, 0.01516931470805357, 0.010193862822608916, 0.01025009174084448, 0.010269173606027466, 0.00905916162371004, -0.006477608845409738, 0.01204099185057847]\n6 Tolerância: 7.042549116345512\n=====================================================================\n7 Derivada: [-1.3097858094129347, -0.22331765922033808, 0.6998299952526117, 0.13941002172181358, 0.014418815367451834, 0.0014378376431198256, 0.000703884938752404, -0.0807446700189547, -0.6535137659215747, 0.46553732942942233]\n7 Solução: [-0.30337852080261385, 0.1027262580787415, 0.024334856856862132, 0.0114073577781321, 0.010307593688173413, 0.010217614040803845, 0.010202378701649447, 0.00973338347459426, 0.006790875231140163, 0.0023738026335520246]\n7 Tolerância: 1.7103047196380934\n=====================================================================\n8 Derivada: [-0.5065224403265542, 1.3878484436177327, -0.32116704742996816, -0.00959747714071222, -0.001170923199181391, -0.00011185974786583758, -0.0025177800307810555, 0.012579335624601666, 0.27359145280158453, -0.19879212919084455]\n8 Solução: [-0.29426502090801704, 0.10428010409626583, 0.019465434282472622, 0.010437341757850927, 0.010207267458199687, 0.01020760955524991, 0.010197481064746506, 0.010295205519403881, 0.011338029119998776, -0.0008654097416282824]\n8 Tolerância: 1.5493434819554939\n=====================================================================\n9 Derivada: [-1.5014443683076255, -0.44753862463924754, 0.5904831980808952, -0.015468875059349959, 0.0002996748308803296, -5.733898792612685e-05, 0.0017080312430330369, 0.0019684235029851435, -0.157670437756901, 0.11562410365573551]\n9 Solução: [-0.29036964569554474, 0.09360695322371734, 0.02193534687867477, 0.010511150480978964, 0.010216272360732455, 0.010208469804775733, 0.01021684387504182, 0.010198465023267418, 0.009233993289127215, 0.000663387150342324]\n9 Tolerância: 1.6857554657842106\n=====================================================================\n10 Derivada: [-0.5036147735182261, 1.5385363484504797, -0.34343543206754057, 0.03852954556773108, -0.0005697538621838097, 7.872236824901097e-05, -0.0005913632425910281, -0.0051701786720706605, 0.0671535649506537, -0.04957143581224856]\n10 Solução: [-0.27992258405082615, 0.09672093047035273, 0.017826760173277917, 0.010618783034492508, 0.01021418722091798, 0.010208868769901683, 0.010204959380308801, 0.010184768717155339, 0.010331065426840027, -0.0001411262662075935]\n10 Tolerância: 1.6574524721086137\n=====================================================================\n11 Derivada: [-1.4904353520700866, -0.34725027891192584, 0.5235706621520175, -0.03276264629586288, 0.0013419286265634114, -6.275780332805347e-05, 7.889128987181859e-05, 0.0038459094101014266, -0.028823204802606712, 0.02129948317949222]\n11 Solução: [-0.2764184285221959, 0.08601578253801906, 0.02021638659269318, 0.01035069415542016, 0.010218151572742256, 0.010208321019048389, 0.010209074090370775, 0.010220742860747627, 0.009863810397276152, 0.00020379217145087423]\n11 Tolerância: 1.6181721080392273\n=====================================================================\n12 Derivada: [-0.5708865300487709, 1.5789446342935327, -0.3696559634276574, 0.046498035159526094, -0.0017597114913231598, 7.746058275270862e-05, 7.754310866722042e-05, -0.002916708096550709, 0.016742635831131433, -0.012345355307448855]\n12 Solução: [-0.2649563402433366, 0.08868628639195598, 0.01618990322897527, 0.010602652983134927, 0.010207831564994028, 0.010208803653522225, 0.01020846738245306, 0.010191166164844747, 0.010085473227179011, 3.998999367890035e-05]\n12 Tolerância: 1.719950442996181\n=====================================================================\n13 Derivada: [-1.6196888369113198, -0.5204817354197859, 0.5975877583581882, -0.04440338568482101, 0.0024137962274398336, -9.337224492126511e-05, -0.0001297166785057119, 0.00205508118344664, -0.009749609171158313, 0.007158980758049497]\n13 Solução: [-0.26056597752445565, 0.07654352370146617, 0.01903271642428074, 0.010245063113133689, 0.010221364502488334, 0.010208207948357013, 0.010207871042628104, 0.010213596903380232, 0.009956715163536276, 0.0001349310806380407]\n13 Tolerância: 1.8037550654456251\n=====================================================================\n14 Derivada: [-0.7020186166911442, 1.4927198253428633, -0.34207679820753717, 0.04505170452493446, -0.00220022463047264, 0.00010021847444716125, 0.00010476614462388151, -0.0010586544161488437, 0.004216217951560368, -0.0030745653326343866]\n14 Solução: [-0.24929616994340764, 0.08016503968278059, 0.014874696132970105, 0.010554022217630124, 0.010204569289480025, 0.010208857633166646, 0.01020877361229056, 0.010199297632450489, 0.010024553020318017, 8.511883659399514e-05]\n14 Tolerância: 1.6852661568156477\n=====================================================================\n15 Derivada: [-1.7558261600496579, -0.7256549409561751, 0.6821856751483859, -0.05731198969992637, 0.003500034401645216, -0.0001598777611036134, -0.00012101971635794517, 0.0010230769633859452, -0.0037364279479888186, 0.002709615547689132]\n15 Solução: [-0.2428689975493847, 0.06649878151618943, 0.018006502757184618, 0.010141561543878892, 0.010224712947595924, 0.010207940105629592, 0.010207814449589535, 0.010208989903106149, 0.009985952392099385, 0.00011326732291572108]\n15 Tolerância: 2.0194546781827123\n=====================================================================\n16 Derivada: [-0.768542404515201, 1.5796292969280885, -0.4003034004677124, 0.051446027594062436, -0.00297696797389619, 0.00015647497433101537, 7.026825120996089e-05, -0.0004968611190837788, 0.0016216919250268743, -0.0011650711574312904]\n16 Solução: [-0.23065194541036732, 0.07154789426454172, 0.013259849499926563, 0.0105403388159609, 0.010200359680885257, 0.010209052536340395, 0.01020865650572142, 0.010201871325602121, 0.010011950486952335, 9.441379676602863e-05]\n16 Tolerância: 1.8024396731484782\n=====================================================================\n17 Derivada: [-1.8137762534096424, -0.781488840590017, 0.700806381542814, -0.06406913438973567, 0.0043679484727875625, -0.0002359596789572732, -6.986303378087727e-05, 0.00046760853557454435, -0.0014398521685119086, 0.0010276528635062218]\n17 Solução: [-0.22361572954871692, 0.057085956121279195, 0.016924736589169535, 0.010069336366064087, 0.010227614636700957, 0.010207619965164269, 0.010208013180667618, 0.010206420225007796, 0.009997103453947328, 0.00010508034178645671]\n17 Tolerância: 2.0966082207284513\n=====================================================================\n18 Derivada: [-0.9024457265967314, 1.522382769326542, -0.3980151318572498, 0.05259787942930027, -0.003492971816200126, 0.0002113533761670533, 3.298863044678274e-05, -0.00021989245021313691, 0.0006265700538806612, -0.00044251294406335087]\n18 Solução: [-0.21099546020736565, 0.0625235615794861, 0.01204852031134478, 0.01051512990368798, 0.010197222417102704, 0.010209261774453889, 0.01020849928820247, 0.010203166601164076, 0.010007121956584679, 9.792992513364243e-05]\n18 Tolerância: 1.814732225777096\n=====================================================================\n19 Derivada: [-1.9364237858575142, -1.0114018445743413, 0.7947842416460479, -0.08212290820277042, 0.006149796480536138, -0.0003833247982900287, -3.676495024576565e-05, 0.00027137339139150785, -0.0007464574187092574, 0.000524022010820635]\n19 Solução: [-0.20141138083554977, 0.04635567816500745, 0.01627548765406704, 0.009956534260334816, 0.010234318187123384, 0.01020701717683102, 0.01020814894508127, 0.010205501884753986, 0.010000467709186289, 0.00010262946445658084]\n19 Tolerância: 2.326184817720052\n=====================================================================\n20 Derivada: [-1.017278868456076, 1.5526663440571595, -0.4443520398944648, 0.0611009195326656, -0.0046763897070485685, 0.0003186487526963405, 1.0715838879959971e-05, -0.00012497240277445365, 0.0003254122591352817, -0.00022598963134100314]\n20 Solução: [-0.18793772900524236, 0.05339302010113263, 0.010745372691441948, 0.010527946097194913, 0.010191527855166528, 0.010209684353772248, 0.010208404755892306, 0.010203613666576579, 0.010005661565737366, 9.898331521136891e-05]\n20 Tolerância: 1.9096683672188508\n=====================================================================\n21 Derivada: [-1.9626580551926125, -1.0289609507849276, 0.7918133275622006, -0.09234589916360589, 0.00782137691134132, -0.000554030268674309, -3.423781278412419e-06, 0.00015255196780166597, -0.00038798064149802203, 0.00026784674525179297]\n21 Solução: [-0.17713410820987144, 0.036903521574158396, 0.015464443427625839, 0.009879047171493996, 0.010241191661869802, 0.010206300266676962, 0.010208290952427639, 0.010204940888139247, 0.0100022056494111, 0.00010138335157937029]\n21 Tolerância: 2.3550679644314174\n=====================================================================\n22 Derivada: [-1.1211203323063352, 1.6694909511189728, -0.5473252377364862, 0.07879668216187789, -0.0070154176216414565, 0.000535343176081074, -1.0816868946182667e-05, -9.217091113335185e-05, 0.0002280040770442247, -0.0001560639313694899]\n22 Solução: [-0.16204042443580716, 0.04481667341735303, 0.009375058706383329, 0.010589226815940673, 0.010181041912673695, 0.010210560997502949, 0.010208317282776826, 0.010203767697957179, 0.010005189387254651, 9.932349501798565e-05]\n22 Tolerância: 2.0856496004825056\n=====================================================================\n23 Derivada: [-1.9684923533077416, -0.9973873647576235, 0.769928716747379, -0.10193349245683184, 0.009922356529845397, -0.0007968405219665187, 2.8256713249318055e-05, 9.715551755317547e-05, -0.00023746708785802375, 0.00016157999169516546]\n23 Solução: [-0.15095512818131337, 0.028309255565712795, 0.014786843893963331, 0.00981010923108812, 0.01025040822460643, 0.010205267687290234, 0.010208424236681201, 0.01020467905584314, 0.01000293495241083, 0.00010086660762063808]\n23 Tolerância: 2.3394494110829136\n=====================================================================\n24 Derivada: [-1.2146626467815052, 1.933872859569725, -0.750363964471943, 0.11501612378900744, -0.01193409239691854, 0.001019804563407542, -4.913545406155101e-05, -8.688985624046713e-05, 0.00021142053694123294, -0.00014298161163684223]\n24 Solução: [-0.13293304242715315, 0.037440609613176384, 0.007737935964757591, 0.010743338226969759, 0.010159566337431331, 0.010212562980154917, 0.010208165538744958, 0.01020378957051398, 0.010005109028532577, 9.938729861463987e-05]\n24 Tolerância: 2.406592592867631\n=====================================================================\n25 Derivada: [-1.9462036849880995, -0.8536382365621149, 0.6964102747095362, -0.1048038482580641, 0.01198336866003407, -0.0010935096834887442, 6.49126994466745e-05, 6.36872449807091e-05, -0.00015624289015943598, 0.00010500702672495282]\n25 Solução: [-0.12270211925479917, 0.021151885576273184, 0.01405814025535379, 0.009774574489000824, 0.010260085426223736, 0.010203973317694575, 0.010208579399380975, 0.01020452143087538, 0.010003328264732657, 0.00010059161150562401]\n25 Tolerância: 2.238866449860008\n=====================================================================\n26 Derivada: [-1.3673411655702188, 2.1417071692805636, -0.9744174041371201, 0.16335763873545384, -0.01994400688228673, 0.0019226312526168382, -0.00012877807866803342, -8.378903536287313e-05, 0.0002099188897326946, -0.00014051025252535126]\n26 Solução: [-0.10060776589739032, 0.03084284700208821, 0.0061521154081874245, 0.010964364270250819, 0.010124043765410361, 0.010216387429091603, 0.010207842475424854, 0.010203798418938953, 0.010005102018246429, 9.9399515132892e-05]\n26 Tolerância: 2.726372682698416\n=====================================================================\n27 Derivada: [-1.9547530536335567, -0.7974018721074145, 0.6631669715985373, -0.1157542843482152, 0.015685611555222558, -0.0016354575324223283, 0.00012510425474097225, 4.642167044677237e-05, -0.0001232667900902613, 8.206683017978125e-05]\n27 Solução: [-0.09009232480474826, 0.014372198605521375, 0.013645803940980219, 0.009708073835639784, 0.010277421748025603, 0.010201601568628363, 0.010208832834184141, 0.010204442792623995, 0.01000348765178486, 0.00010048009935031109]\n27 Tolerância: 2.215930388293184\n=====================================================================\n28 Derivada: [-1.5710306751198435, 2.552187327017693, -1.410584524397772, 0.26401140618315483, -0.03840819173362933, 0.004192398270911182, -0.00034437785470734644, -9.155572844314619e-05, 0.0002585206054229871, -0.00017178058768155605]\n28 Solução: [-0.060742394922310816, 0.02634490737861854, 0.003688585007066633, 0.011446083720262644, 0.010041907804996749, 0.010226157388512146, 0.01020695443778117, 0.010203745787562356, 0.010005338459302085, 9.924789474678165e-05]\n28 Tolerância: 3.3230620286323376\n=====================================================================\n29 Derivada: [-1.977971173124457, -0.8078301409691243, 0.6620315803155458, -0.13470208555918645, 0.022371183621088114, -0.0026821589180886785, 0.00024672100698565347, 3.221547853234086e-05, -0.00011236216190668675, 7.446789408768403e-05]\n29 Solução: [-0.0498111512111498, 0.008586768018265939, 0.013503443148017926, 0.009609090293451143, 0.01030915230314334, 0.010196986648590034, 0.010209350621584675, 0.010204382833036141, 0.01000353967090986, 0.00010044314541790576]\n29 Tolerância: 2.2409591043728385\n=====================================================================\n30 Derivada: [-1.9211907986645869, 3.005843831949745, -2.0902634627197183, 0.4412365998546705, -0.07897887796420877, 0.009854499190962138, -0.00095571030505575, -8.52896284035537e-05, 0.0003536273704472775, -0.00023456948057925045]\n30 Solução: [-0.00997164393996628, 0.02485776084784425, 0.00016906683550998482, 0.012322206030422061, 0.009858560836165858, 0.010251009624845483, 0.010204381265364871, 0.010203733961703397, 0.010005802824805685, 9.894324276599317e-05]\n30 Tolerância: 4.158881842830666\n=====================================================================\n31 Derivada: [-2.0009074657687407, -0.7918845538887629, 0.6056766134045373, -0.13984488072473467, 0.030670852810660183, -0.004307025632336439, 0.0004712588894907957, 4.602215795838915e-06, -9.927119653570357e-05, 6.630730239242169e-05]\n31 Solução: [0.001988894479454365, 0.006144621757336804, 0.013182181654687918, 0.00957524965925651, 0.010350250628179365, 0.010189659593261124, 0.01021033112200035, 0.0102042649396146, 0.010003601287221114, 0.00010040357522565404]\n31 Tolerância: 2.2401053707328753\n=====================================================================\n32 Derivada: [-2.6267831721742607, 3.733732104458763, -3.586388300389706, 0.8138240966310714, -0.1944776408436414, 0.02820608071056553, -0.003227482619301794, 2.4804169943631882e-05, 0.0005787271399640577, -0.00038887334067317855]\n32 Solução: [0.06866952657843706, 0.03253432918063176, -0.007002109783231842, 0.014235607622861558, 0.009328138467863175, 0.01033319206367468, 0.010194626327465317, 0.010204111569874334, 0.010006909519234572, 9.819387142278159e-05]\n32 Tolerância: 5.865480713641254\n=====================================================================\n33 Derivada: [-2.003952273148086, -1.0125517990650115, 0.4874000274568044, -0.09992906502603188, 0.03913096495181328, -0.006766181465220518, 0.0009198127556147156, -6.157454460047063e-05, -7.168823153709986e-05, 5.1078608806650466e-05]\n33 Solução: [0.08309887749980446, 0.012024326165416387, 0.012698509542639345, 0.00976513834205123, 0.010396436055505248, 0.010178251434771426, 0.01021235541900201, 0.010203975316499398, 0.010003730476107328, 0.00010033001648263181]\n33 Tolerância: 2.3000452897436348\n=====================================================================\n34 Derivada: [-4.138345497346043, 5.046543334675493, -6.260767115018711, 1.0745649179902406, -0.45515307856718124, 0.08207674035932998, -0.011645807970622639, 0.0008728463207793649, 0.0007902619706962673, -0.0005758491785412857]\n34 Solução: [0.20271956372653957, 0.07246595577269063, -0.016395559479135235, 0.01573014234568033, 0.00806061551577909, 0.010582140928972212, 0.010157449603878035, 0.010207650847787195, 0.010008009717467391, 9.72810126122153e-05]\n34 Tolerância: 9.119179566972813\n=====================================================================\n35 Derivada: [-1.4634749424004285, -2.32359624319223, 0.18361632826973673, 0.06519941074782704, 0.0021531962610451677, -0.003468579278348344, 0.0009745527103674559, -0.00016095032813259053, 3.5977923783086455e-05, -1.2497126054483026e-05]\n35 Solução: [0.22242115972220555, 0.04844066401825408, 0.013410338651447008, 0.010614415807396714, 0.010227481978684371, 0.010191394728531067, 0.010212892293191302, 0.010203495451484657, 0.01000424748396432, 0.00010002248209013402]\n35 Tolerância: 2.752969664196291\n=====================================================================\n36 Derivada: [-3.452451679021448, 2.0130874780541044, -1.0329930750739817, -0.08867009356387726, -8.555169839923743e-05, 0.005499801201826984, -0.001689301238838363, 0.000300650278133717, -6.627144277609808e-05, 2.1384275920650203e-05]\n36 Solução: [0.24117907829545324, 0.07822308656893574, 0.011056857881388127, 0.009778730000692388, 0.010199883637936112, 0.010235852836957162, 0.010200401078031563, 0.01020555841345413, 0.010003786341142394, 0.0001001826625388304]\n36 Tolerância: 4.128790873007555\n=====================================================================\n37 Derivada: [-1.4266412437183957, -2.2959786841186407, 0.4106598169373299, -0.02374670951292708, -0.0018218772648125325, -0.0005232028334395886, 0.00027871546800188607, -6.592018595844712e-05, 1.4505147583364797e-05, -3.5659287653637406e-06]\n37 Solução: [0.26014396178617166, 0.06716486677982018, 0.01673125831819589, 0.01026580937207599, 0.010200353587451244, 0.010205641526644391, 0.010209680686887487, 0.010203906892150906, 0.010004150381050222, 0.00010006519520284049]\n37 Tolerância: 2.7342334250926896\n=====================================================================\n38 Derivada: [-3.567021491423546, 1.924636573657124, -1.5974660169660786, 0.06612069207855431, 0.00150696323436246, 0.0007240852052765936, -0.00045960995317540637, 0.00011682457466802992, -2.6696831184245065e-05, 6.31921990466508e-06]\n38 Solução: [0.27842976874301145, 0.09659330450350881, 0.01146767423879896, 0.010570180038440413, 0.010223705285791737, 0.010212347617649171, 0.010206108284038342, 0.010204751816409407, 0.010003964463020895, 0.00010011090107690827]\n38 Tolerância: 4.357079400596003\n=====================================================================\n39 Derivada: [-1.4284010827317584, -2.1922880063499957, 0.24841865845644895, -0.04264263199001463, 0.0014113663757385833, 6.178945404283764e-05, -9.924683298800918e-06, -2.8184904940081745e-06, 1.3774444320145164e-06, -2.062071545738675e-07]\n39 Solução: [0.2954114384409507, 0.08743060597169389, 0.01907279809886696, 0.010255396470195343, 0.010216531022346896, 0.010208900434665066, 0.010208296368532219, 0.010204195644728247, 0.010004091559751386, 0.00010008081690011604]\n39 Tolerância: 2.628685712623135\n=====================================================================\n40 Derivada: [-4.020525569612772, 2.4077143937795134, -1.97685571728099, 0.11722663128511827, -0.005830880645974083, -4.081815755278828e-05, 2.4818357180690642e-05, 5.425180098071747e-06, -3.1999678514048227e-06, 5.300228205599999e-07]\n40 Solução: [0.3179045707251788, 0.12195271935293772, 0.015160932017216338, 0.010926892994452165, 0.010194306136791443, 0.010207927431787682, 0.010208452653217955, 0.010204240027696232, 0.010004069869037064, 0.00010008406405867805]\n40 Tolerância: 5.0875779177761284\n=====================================================================\n41 Derivada: [-1.4160599797491606, -2.121909799978118, 0.059182454076793345, -0.037376749403351596, 0.0008395708785930017, -0.00010373677375281193, 4.4365598166548614e-06, 1.4509453563418817e-06, -4.973090286381474e-07, 5.141948146436537e-08]\n41 Solução: [0.33410053554417557, 0.11225367455377708, 0.02312434005802111, 0.010454666574480376, 0.010217794791737383, 0.010208091860400871, 0.010208352676925601, 0.010204218173333045, 0.01000408275953256, 0.00010008192895698391]\n41 Tolerância: 2.5519849853436636\n=====================================================================\n42 Derivada: [-4.5592154290396145, 2.9419738213881903, -2.363604792108678, 0.11673545396873528, -0.005534812587574198, 0.0003619092087115833, -2.0724145133432792e-05, -3.816318832429444e-06, 1.5721811716182124e-06, -1.8680773269594275e-07]\n42 Solução: [0.36158509813940465, 0.15343820314221956, 0.02197565717249741, 0.011180118619784686, 0.010201499409694378, 0.01021010530608284, 0.010208266566938925, 0.010204190011673907, 0.010004092411892856, 0.00010008093094702873]\n42 Tolerância: 5.919623369708069\n=====================================================================\n43 Derivada: [-1.533799989303155, -1.854915727061386, -0.23761936828743135, -0.04016356321948521, -0.00022622970164980216, 5.089729452734115e-05, -2.376991021173225e-06, -1.5820439374925055e-06, 4.7124892852339973e-07, -4.29323949967253e-08]\n43 Solução: [0.3766118091482022, 0.14374175622114033, 0.029765858513675914, 0.010795370419448279, 0.01021974158985362, 0.01020891249008733, 0.010208334871616489, 0.010204202589873184, 0.010004087230143389, 0.00010008154664634303]\n43 Tolerância: 2.4189524367917246\n=====================================================================\n44 Derivada: [-6.951615132890872, 6.42375357536089, -5.266031452271569, 0.19726950841607893, -0.007836166593618979, -0.0005259291538957794, 3.2819783176527006e-05, 1.4424661866838107e-05, -4.862932384685147e-06, 5.05479735558173e-07]\n44 Solução: [0.45693409862709056, 0.24088028733800246, 0.04220954857657973, 0.01289866248746087, 0.010231588823985427, 0.010206247092217185, 0.01020845935027226, 0.010204285438609656, 0.010004062551702185, 0.00010008379493704781]\n44 Tolerância: 10.833264842672351\n=====================================================================\n45 Derivada: [-0.8646541418085008, -2.2269248630231093, -1.026057630629591, -0.16737507989869496, -0.001305459768741378, -0.0003408773764909112, 1.0804427269361927e-05, 7.391261943184846e-06, -2.187683959664524e-06, 1.9643830318633038e-07]\n45 Solução: [0.47475440109177663, 0.22441314560428924, 0.05570889678186573, 0.012392966726140354, 0.010251676653388015, 0.010207595299276732, 0.0102083752175273, 0.010204248461327038, 0.010004075017715379, 0.00010008249915159288]\n45 Tolerância: 2.6053083559993775\n=====================================================================\n46 Derivada: [-5.727840057903563, 2.8835363018749387, -1.2998919335925763, -0.05052605707942667, -0.013384024024235924, 0.0007751296788001197, -5.2032990684861e-05, -1.825465503942081e-05, 6.300742524034653e-06, -6.656431177226585e-07]\n46 Solução: [0.49027004548213915, 0.2643738334539471, 0.0741208195971184, 0.015396401387408441, 0.010275102262031202, 0.010213712117433979, 0.010208181339254767, 0.010204115830039728, 0.010004114274251276, 0.00010007897419668952]\n46 Tolerância: 6.543346620088549\n=====================================================================\n47 Derivada: [-1.3949028351310417, -2.2584063014558753, 0.14241694626424983, -0.12608698907637234, -0.007382121026016458, 0.00023982089030104292, -1.769080426299724e-05, -9.462125731593551e-06, 2.8389536588378415e-06, -2.597372593150793e-07]\n47 Solução: [0.5049532292243236, 0.2569819557269728, 0.07745306210266578, 0.015525923750527089, 0.010309411893929268, 0.010211725090669477, 0.010208314724606864, 0.010204162625420078, 0.010004098122445489, 0.00010008068055722078]\n47 Tolerância: 2.661275636033466\n=====================================================================\n48 Derivada: [-3.7098636531945886, 2.033176846928548, -3.6334042957100188, 0.27087686667962185, 0.0012471325201643887, -0.000819185824924587, 4.2529231305050985e-05, 1.5117454750808446e-05, -5.471910955430557e-06, 5.980723992993209e-07]\n48 Solução: [0.5238538901983892, 0.28758292001671915, 0.07552334029659502, 0.01723437782614688, 0.010409438192011278, 0.010208475564445915, 0.010208554431549392, 0.01020429083537567, 0.010004059655177895, 0.00010008419994635457]\n48 Tolerância: 5.583180070321869\n=====================================================================\n49 Derivada: [-0.4839065369247848, -2.433019510375029, -1.1265355901830247, -0.16543930254463857, 0.005295730640400453, -0.0003895507379808663, 1.2520405195323714e-05, 7.82746240758636e-06, -2.472996048228976e-06, 2.3531220424630184e-07]\n49 Solução: [0.5333640387234242, 0.28237091882220017, 0.08483748705073448, 0.01653999132709023, 0.010406241197025505, 0.010210575528108442, 0.01020844540886172, 0.01020425208213473, 0.010004073682293382, 0.00010008266680177629]\n49 Tolerância: 2.7295109946392504\n=====================================================================\n50 Derivada: [-4.545182625794986, 1.7228754033887306, -1.736536374035036, -0.24365274640309784, -0.01644808338589193, 0.0005911749163380262, -2.7415362388434616e-05, -7.100698837553265e-06, 2.7935274022660773e-06, -3.2787576110041017e-07]\n50 Solução: [0.5381487591202635, 0.3064278744301564, 0.09597632772515159, 0.018175804743364124, 0.010353878625996155, 0.010214427287114356, 0.010208321610909961, 0.010204174686571374, 0.010004098134524816, 0.0001000803401063974]\n50 Tolerância: 5.167415300355855\n=====================================================================\n51 Derivada: [-0.8412482359411086, -2.563233206938113, -0.30580972701148035, -0.2949930429848922, -0.01271047879153659, 0.00011578162089319333, -7.380094700505657e-06, -3.75210650677138e-06, 1.26874340588461e-06, -1.311190985404942e-07]\n51 Solução: [0.5498002282537712, 0.3020113237134617, 0.10042789801991914, 0.018800402848157222, 0.010396042902253934, 0.010212911824071986, 0.010208391889548895, 0.010204192889046422, 0.010004090973382793, 0.00010008118060822639]\n51 Tolerância: 2.7310373710213596\n=====================================================================\n52 Derivada: [-3.888758715051054, 1.5648791613513318, -2.576622958962099, 0.09206887702855053, -0.011423835952041952, -0.0005250910204898165, 8.785227379723426e-06, 2.299061623529941e-06, -1.024187173348024e-06, 1.3252345611516891e-07]\n52 Solução: [0.556885937272318, 0.32360105653557225, 0.10300368795690548, 0.02128508546118915, 0.010503101378403351, 0.010211936612665538, 0.010208454050991075, 0.010204224492482575, 0.010004080286945414, 0.0001000822850049304]\n52 Tolerância: 4.921268582806373\n=====================================================================\n53 Derivada: [-0.47296112476081476, -2.662865550660417, -0.6826405779031572, -0.23585422271393353, -0.0036314132676622257, -0.000381626934928464, -1.1494950580212548e-06, 1.2126481170604553e-06, -4.682148020494618e-07, 5.407309564692264e-08]\n53 Solução: [0.5668546790955691, 0.31958952548230346, 0.10960880052260422, 0.02104906905279077, 0.010532386114120256, 0.010213282671189743, 0.01020843153026659, 0.010204218598891987, 0.010004082912425228, 0.00010008194528415667]\n53 Tolerância: 2.7993184656636374\n=====================================================================\n54 Derivada: [-3.836283913298388, 1.2818665691158042, -2.2039810281426133, -0.12331867531181254, -0.012994336076915348, 3.763527518522991e-05, -1.0380451669525503e-05, -5.049972859050289e-07, 2.260754450755689e-07, -3.42118004620362e-08]\n54 Solução: [0.5701455462966636, 0.33811776478743577, 0.1143586189967839, 0.022690144577045435, 0.010557653516007065, 0.010215938034384435, 0.010208439528462183, 0.010204210161276915, 0.01000408617026748, 0.00010008156904313472]\n54 Tolerância: 4.6079458424374415\n=====================================================================\n55 Derivada: [-0.481658442535263, -2.696854694132048, -0.5937784490810223, -0.3257329920897556, -0.009285972627110492, -0.00012247492634286095, -4.721554929469629e-06, -3.548104603248725e-07, 1.0332725774574603e-07, -1.4352467145317682e-08]\n55 Solução: [0.5799797701954997, 0.33483172988125515, 0.12000847270662214, 0.023006269110925813, 0.010590964191985486, 0.010215841557238575, 0.010208466138506747, 0.010204211455825622, 0.010004085590728375, 0.00010008165674428337]\n55 Tolerância: 2.8220172163837938\n=====================================================================\n56 Derivada: [-3.5757773518441525, 1.0452230631812327, -2.2205889814878397, -0.10404962154922265, -0.01732391270714647, -0.00021716895353186827, -2.0250583705264114e-06, -3.457868794076724e-08, -3.603893867923716e-08, 6.092178445743501e-09]\n56 Solução: [0.5829783762220254, 0.35162123054052447, 0.12370509146921152, 0.025034147845859594, 0.010648774812588835, 0.010216604035417711, 0.01020849553295272, 0.010204213664728633, 0.010004084947455652, 0.00010008174609680099]\n56 Tolerância: 4.338296536122815\n=====================================================================\n57 Derivada: [-0.41455938286229355, -2.7068079377204235, -0.6566906913607837, -0.33970691347104304, -0.011216163164224684, -0.00029649568215612404, -3.2724657424293424e-06, -3.859149520779592e-08, -1.7669090148246178e-08, 2.599056258406618e-09]\n57 Solução: [0.5921447976561962, 0.34894182571547494, 0.12939751927820134, 0.02530087661203802, 0.0106931842567844, 0.010217160742940193, 0.01020850072414239, 0.01020421375337029, 0.010004085039840627, 0.00010008173047964434]\n57 Tolerância: 2.8364481372029196\n=====================================================================\n58 Derivada: [-3.6877518504688283, 1.1971843771868507, -2.3092549038265235, -0.13549810104415888, -0.01969255965008282, -0.00023066664404432224, -6.783164006984865e-06, -7.463169524740687e-08, 3.643910979650878e-09, -1.0772548617032918e-09]\n58 Solução: [0.5947256727204023, 0.3657932911480606, 0.13348580361162418, 0.027415751195414874, 0.010763011444452304, 0.010219006602289554, 0.010208521097159098, 0.01020421399362496, 0.010004085149841066, 0.00010008171429899625]\n58 Tolerância: 4.514883159064987\n=====================================================================\n59 Derivada: [-0.23384084349143563, -2.8731773882035263, -0.6183274420315754, -0.3901609931832886, -0.01353040171952867, -0.00033147634616215865, -5.7573759065787655e-06, -1.0769362256135562e-07, 9.643202903949002e-10, -4.875717572738747e-10]\n59 Solução: [0.6041791381573952, 0.3627243370561509, 0.13940552443442167, 0.027763097401704833, 0.010813492859570924, 0.010219597910825313, 0.01020853848564105, 0.010204214184941562, 0.010004085140499986, 0.00010008171706051384]\n59 Tolerância: 2.973981750451795\n=====================================================================\n60 Derivada: [-3.216374161434544, 0.6566880324337454, -2.1140688407446846, -0.18779693023820432, -0.022097232584519715, -0.00029851294463723727, -6.782631398902228e-06, -1.1802079190326165e-07, -2.0653805457354277e-09, -4.960049732050109e-12]\n60 Solução: [0.6052923941261654, 0.37640279386034253, 0.14234922197339034, 0.029620553301869416, 0.010877907613850907, 0.010221175984250645, 0.010208565895023418, 0.010204214697643134, 0.010004085135909106, 0.00010008171938171727]\n60 Tolerância: 3.9091360996445017\n=====================================================================\n61 Derivada: [-0.3535434345109536, -2.694604227118184, -0.7222757531678452, -0.4072640393882345, -0.01663188941668271, -0.0003941527987469731, -6.469204754194768e-06, -1.291005454276406e-07, -2.230395219318844e-09, -2.3603927840065708e-11]\n61 Solução: [0.6135374939052335, 0.3747193894803244, 0.1477685878981509, 0.0301019663310445, 0.010934553351677435, 0.010221941215187826, 0.01020858328214004, 0.010204215000186668, 0.010004085141203661, 0.00010008171939443224]\n61 Tolerância: 2.8414270952156855\n=====================================================================\n62 Derivada: [-3.4048189201359804, 0.9188282187208188, -2.2600470718835606, -0.20136676890913702, -0.026484963909219165, -0.0003667774665541135, -8.288087080643436e-06, -1.3375813110588775e-07, -2.6496551520254798e-09, -4.6699602518351924e-11]\n62 Solução: [0.6154795659942219, 0.3895212925833906, 0.1517361671086676, 0.032339134516160536, 0.01102591504891263, 0.010224106361177036, 0.01020861881854311, 0.010204215709357143, 0.010004085153455588, 0.0001000817195240925]\n62 Tolerância: 4.193578436740116\n=====================================================================\n63 Derivada: [-0.13501606893228768, -2.90153742756031, -0.6650756360485945, -0.4579564666933519, -0.019772990631362477, -0.00048101769993757343, -7.927157722444067e-06, -1.5261048759429574e-07, -2.6770923712430075e-09, -4.993735286396017e-11]\n63 Solução: [0.6242077394955471, 0.3871658979797361, 0.15752974480758786, 0.032855333508725465, 0.011093808633152572, 0.010225046586616202, 0.010208640064860088, 0.010204216052242978, 0.010004085160247917, 0.00010008171964380583]\n63 Tolerância: 3.014894643607689\n=====================================================================\n64 Derivada: [-3.3849691809286355, 0.9243868253300214, -2.2884539012391354, -0.22704842289442578, -0.030742449238439357, -0.00044715015163619004, -9.823331745172081e-06, -1.6282461597072473e-07, -3.0680491645493357e-09, -5.3390195042801736e-11]\n64 Solução: [0.6248505161909035, 0.4009793696197327, 0.1606959984460028, 0.035035546179750944, 0.011187942743824342, 0.01022733658787323, 0.010208677804014283, 0.010204216778782164, 0.010004085172992864, 0.0001000817198815447]\n64 Tolerância: 4.195476719199002\n=====================================================================\n65 Derivada: [-0.03663283947086882, -2.9756938733569562, -0.6569800153863268, -0.49629911551518835, -0.023363460632747694, -0.0005741655591848804, -9.527464713027234e-06, -1.8299023168272255e-07, -3.182947167824652e-09, -5.748963111118499e-11]\n65 Solução: [0.6335278053509988, 0.3986097256583154, 0.166562396386191, 0.035617579490393396, 0.011266750291920927, 0.010228482846806866, 0.010208702985894979, 0.010204217196179251, 0.010004085180857736, 0.00010008172001840921]\n65 Tolerância: 3.087811154619018\n=====================================================================\n66 Derivada: [-3.0271355444508004, 0.529953794852652, -2.1458883116584553, -0.27738434784302435, -0.03401605242277897, -0.0005421884449146874, -1.1393779894550149e-05, -1.937202922519532e-07, -3.601605217404616e-09, -6.248097525474172e-11]\n66 Solução: [0.6336753741623439, 0.410596773732141, 0.1692089223270783, 0.037616831298499014, 0.01136086579495812, 0.010230795769591669, 0.010208741365574609, 0.010204217933322518, 0.010004085193679667, 0.00010008172024999587]\n66 Tolerância: 3.7586349566853974\n=====================================================================\n67 Derivada: [-0.15238953404497124, -2.8058964806162194, -0.7491977636855678, -0.5154613084734649, -0.02755176331944347, -0.0006648365466641881, -1.1299666586188362e-05, -2.146401098801487e-07, -3.763471158953376e-09, -6.739288294088652e-11]\n67 Solução: [0.6414353651820541, 0.4092382495998283, 0.17470985671975744, 0.03832789957299895, 0.011448065148092684, 0.010232185656962666, 0.010208770573262327, 0.010204218429919946, 0.010004085202912297, 0.00010008172041016438]\n67 Tolerância: 2.953647714515551\n=====================================================================\n68 Derivada: [-3.34233166755358, 0.942952300580032, -2.347419579298634, -0.28032784173223346, -0.040413441651775205, -0.0006423140448701473, -1.3606558564173421e-05, -2.319337619183237e-07, -4.314960586103769e-09, -7.492179793233689e-11]\n68 Solução: [0.6421608524657155, 0.42259639934885573, 0.17827659412011598, 0.04078187797027253, 0.011579231990067574, 0.010235350772358162, 0.01020882436806175, 0.010204219451766172, 0.010004085220829214, 0.00010008172073100452]\n68 Tolerância: 4.20130170973761\n=====================================================================\n69 Derivada: [0.15231473048848443, -3.114897129731972, -0.638235664656218, -0.5751142621484387, -0.031727336876518575, -0.0007983539665624359, -1.3442992483708727e-05, -2.5661749711997084e-07, -4.50725582209913e-09, -8.077302021680666e-11]\n69 Solução: [0.6507288413595906, 0.42017916322676335, 0.18429414919400164, 0.04150049182236932, 0.01168283090055186, 0.010236997329357952, 0.010208859248155726, 0.010204220046322935, 0.010004085231890515, 0.0001000817209230648]\n69 Tolerância: 3.2349485846852457\n=====================================================================\n70 Derivada: [-2.68940185940572, 0.1991502335776829, -2.0494147208673086, -0.3547480536503745, -0.04321207254503067, -0.0007682849583559997, -1.5447793202193194e-05, -2.700372544461871e-07, -4.958560844369429e-09, -8.697518746880739e-11]\n70 Solução: [0.6502268274773654, 0.4304455478096202, 0.18639770912389883, 0.043396010020368324, 0.011787400980589214, 0.010239628622948917, 0.010208903554893648, 0.010204220892108143, 0.010004085246745972, 0.00010008172118928447]\n70 Tolerância: 3.405929818757202\n=====================================================================\n71 Derivada: [0.5339521557140614, -3.5126056787615987, -0.4876792466408437, -0.6366510257767009, -0.03558319106323536, -0.0009425867704322757, -1.5740929468217868e-05, -3.016835894292802e-07, -5.289896508841024e-09, -9.504126999293483e-11]\n71 Solução: [0.6590908228635903, 0.42978916886594376, 0.19315237190019488, 0.04456522357610076, 0.011929823582971517, 0.010242160812142718, 0.010208954469251127, 0.010204221782123508, 0.010004085263088885, 0.00010008172147594586]\n71 Tolerância: 3.6425164635853826\n=====================================================================\n72 Derivada: [-2.314485172793894, -0.21027019580236583, -1.8922385478154133, -0.40931942446417957, -0.04715264711841502, -0.0009011662600347664, -1.7653156655209057e-05, -3.1329751742914524e-07, -5.7071696278935234e-09, -1.0057831650867044e-10]\n72 Solução: [0.657722049026921, 0.4387936511967535, 0.19440252621897633, 0.04619726355917091, 0.012021040259281081, 0.010244577111236844, 0.01020899482075489, 0.010204222555482319, 0.010004085276649411, 0.00010008172171958193]\n72 Tolerância: 3.0251261904175917\n=====================================================================\n73 Derivada: [0.7506452728652135, -3.7197143902259597, -0.41597923393828573, -0.6877934263215741, -0.040559261084686715, -0.0011030821322727327, -1.8453462054944525e-05, -3.5374345070532387e-07, -6.214255961970849e-09, -1.1154478382624511e-10]\n73 Solução: [0.6670455366614667, 0.43964068689761754, 0.20202506919723667, 0.047846133311040775, 0.012210986225456338, 0.010248207297587082, 0.010209065933324424, 0.010204223817545072, 0.010004085299639719, 0.00010008172212474361]\n73 Tolerância: 3.8791092098159092\n=====================================================================\n74 Derivada: [-2.4761674957801603, 0.016939011865204634, -2.0067578391342202, -0.42368899028446905, -0.0543437041731381, -0.0010532723298641022, -2.0681964342614267e-05, -3.673241279547601e-07, -6.697574030155393e-09, -1.1810272204648697e-10]\n74 Solução: [0.6651212750977253, 0.4491760875561558, 0.20309142221392418, 0.049609275639257704, 0.01231495894063925, 0.010251035022779676, 0.010209113238341899, 0.010204224724358116, 0.010004085315569818, 0.00010008172241068605]\n74 Tolerância: 3.2157792415975655\n=====================================================================\n75 Derivada: [0.47472469478924495, -3.3761186712236224, -0.5709264896131465, -0.6939543168249955, -0.04698239815929918, -0.001254767275423431, -2.1424061462153865e-05, -4.0591893762237374e-07, -7.179861876804687e-09, -1.2858597867038135e-10]\n75 Solução: [0.6732824716780554, 0.44912025829341645, 0.20970549224036752, 0.05100571152032224, 0.012494070270311458, 0.010254506501405938, 0.01020918140399586, 0.010204225935021136, 0.010004085337644342, 0.00010008172279994062]\n75 Tolerância: 3.5260855617875193\n=====================================================================\n76 Derivada: [-2.296458339195965, -0.15892435803747418, -1.9473988793025439, -0.4634878391385481, -0.06006860491160159, -0.0012227823675456662, -2.3747542574496794e-05, -4.2411035858225254e-07, -7.709074060699688e-09, -1.363122600905875e-10]\n76 Solução: [0.6720655260493232, 0.45777485937931683, 0.21116904891540123, 0.05278464714694881, 0.012614508546842864, 0.010257723067907877, 0.010209236324075293, 0.010204226975584818, 0.01000408535604975, 0.00010008172312956776]\n76 Tolerância: 3.051191727904903\n=====================================================================\n77 Derivada: [0.9370115297420938, -3.86491677751372, -0.373701375841307, -0.7695180021641423, -0.052279024581349455, -0.0014803364378948025, -2.5024834451610467e-05, -4.764119706374137e-07, -8.409126558549685e-09, -1.507595888405877e-10]\n77 Solução: [0.6813163958239163, 0.45841505759895024, 0.2190137953774354, 0.05465172462394736, 0.012856484128151806, 0.010262648826956829, 0.010209331986783417, 0.01020422868403719, 0.010004085387104369, 0.00010008172367867721]\n77 Tolerância: 4.068183656912584\n=====================================================================\n78 Derivada: [-2.651196745116991, 0.2952856858408168, -2.155673370210602, -0.4610032249175502, -0.0696523481632054, -0.001421848070860078, -2.7906016321195548e-05, -4.965123482497669e-07, -9.03936551169748e-09, -1.5972747777204255e-10]\n78 Solução: [0.6789143887286302, 0.4683226811741197, 0.2199717700957786, 0.05662436598691696, 0.012990500182376458, 0.010266443634719987, 0.010209396137360015, 0.010204229905308112, 0.010004085408660967, 0.00010008172406514589]\n78 Tolerância: 3.461263200341243\n=====================================================================\n79 Derivada: [0.09372124294287687, -2.8783046834712707, -0.8004927035880751, -0.7202468288241953, -0.0613467219133274, -0.001634469906967445, -2.8714514736960517e-05, -5.36574492115971e-07, -9.55353281114615e-09, -1.7056262305303882e-10]\n79 Solução: [0.6857106694473138, 0.467565723239225, 0.22549778825671885, 0.057806136949230015, 0.013169052344415925, 0.010270088508925072, 0.010209467673778807, 0.01020423117810588, 0.01000408543183317, 0.00010008172447460354]\n79 Tolerância: 3.075179942345921\n=====================================================================\n80 Derivada: [-3.227831056216587, 1.0213036226531855, -2.4849371857339992, -0.4361629660268464, -0.08119650204353243, -0.0016449687734459983, -3.2872079850240765e-05, -5.808687116096167e-07, -1.0601847913135377e-08, -1.8714809910114205e-10]\n80 Solução: [0.6853331302606543, 0.47916045646121624, 0.22872242927459074, 0.06070752187979623, 0.013416176590404671, 0.01027667267725148, 0.010209583345041785, 0.010204233339599807, 0.01000408547031786, 0.00010008172516168442]\n80 Tolerância: 4.2229966784316835\n=====================================================================\n81 Derivada: [-0.47176166174060086, -2.1944636331399963, -1.101752803228635, -0.6996235889699653, -0.07128003635356724, -0.0018509875962475983, -3.3339341659895105e-05, -6.16032521119636e-07, -1.1021625867231686e-08, -1.962943974032072e-10]\n81 Solução: [0.6912434654465978, 0.4772903936755964, 0.23327248515667204, 0.06150616012325359, 0.013564851826080086, 0.010279684705034889, 0.010209643535617683, 0.010204234403202185, 0.010004085489730424, 0.00010008172550436282]\n81 Tolerância: 2.5974307265405856\n=====================================================================\n82 Derivada: [-4.700261609223588, 2.900789729317218, -3.3364470858551103, -0.36423624898714035, -0.11151245407507898, -0.0021512837289408213, -4.340187502891585e-05, -7.559423794295528e-07, -1.3869197532892941e-08, -2.442065716312669e-10]\n82 Solução: [0.6959081010180075, 0.49898858145444647, 0.24416628118468955, 0.06842382500442634, 0.014269647107408082, 0.010297986706413801, 0.010209973184869936, 0.010204240494344057, 0.010004085598708903, 0.00010008172744526202]\n82 Tolerância: 6.464056552039166\n=====================================================================\n83 Derivada: [0.15966285743081698, -2.840478076203212, -0.8337653431234866, -0.8393116068269472, -0.08942297766899776, -0.002545294969945544, -4.38978781633903e-05, -8.063701376695431e-07, -1.4388880817200154e-08, -2.5639798964527394e-10]\n83 Solução: [0.7045145370700526, 0.49367707682312834, 0.2502754982608403, 0.06909076149549166, 0.014473832509156884, 0.01030192582456982, 0.010210052656076654, 0.010204241878515895, 0.010004085624104162, 0.00010008172789241761]\n83 Tolerância: 3.082437266121028\n=====================================================================\n84 Derivada: [-2.6869436755055176, 0.49894940905313945, -2.2919925083888115, -0.5651335806697195, -0.10905655123509611, -0.002608215198688528, -4.983858603892222e-05, -8.709009726021177e-07, -1.565674288421569e-08, -2.7716030859270546e-10]\n84 Solução: [0.7039883045077197, 0.5030390040762395, 0.25302350415248265, 0.07185704730900822, 0.014768561561532731, 0.01031031485828424, 0.010210197339024701, 0.010204244536229971, 0.010004085671528452, 0.00010008172873747934]\n84 Tolerância: 3.6129105423858823\n=====================================================================\n85 Derivada: [0.4411613256042699, -3.1456957209450636, -0.7050700680287338, -0.8816487975191276, -0.09576294232002777, -0.002951737577481406, -5.212892286816928e-05, -9.505330874626239e-07, -1.6661493860970194e-08, -2.9566988440921094e-10]\n85 Solução: [0.7108762216446356, 0.5017599589602585, 0.25889897322916294, 0.07330575399773674, 0.015048125474610981, 0.010317000956816034, 0.01021032509907192, 0.010204246768764203, 0.010004085711664145, 0.0001000817294479733]\n85 Tolerância: 3.372481659486499\n=====================================================================\n86 Derivada: [-2.3081612391323745, 0.076448068436477, -2.113723432103696, -0.6189376461699297, -0.11445884003574189, -0.0029434449285953568, -5.691183586043619e-05, -1.0137174821372996e-06, -1.7979165721590962e-08, -3.1499651151345454e-10]\n86 Solução: [0.7097453149261677, 0.5098238762136577, 0.2607064038234749, 0.07556584002653334, 0.015293611532804412, 0.010324567666914558, 0.010210458730343921, 0.010204249205433495, 0.010004085754375494, 0.00010008173020591612]\n86 Tolerância: 3.193348190135509\n=====================================================================\n87 Derivada: [0.8760393889982652, -3.618040365885804, -0.5005619991641934, -0.9529009690548509, -0.10225832666145873, -0.0033591747115342754, -5.9860860782537384e-05, -1.1181079138389283e-06, -1.9645969508055427e-08, -3.4448407321141516e-10]\n87 Solução: [0.7173527799477221, 0.509571911144348, 0.2676730215806526, 0.07760579564745473, 0.015670856244836277, 0.010334268962455583, 0.010210646305974808, 0.010204252546543361, 0.010004085813632997, 0.00010008173124411263]\n87 Tolerância: 3.876430281212263\n=====================================================================\n88 Derivada: [-1.6852233841937334, -0.6274031754951608, -1.8113593062440847, -0.7027359782242992, -0.11996484172537411, -0.003322192681995323, -6.359528963891664e-05, -1.1603054527048462e-06, -2.073563439822148e-08, -3.622804070624319e-10]\n88 Solução: [0.7157487039180621, 0.5161967409158674, 0.26858957797560656, 0.07935060943356591, 0.015858096833205645, 0.01034041979505727, 0.010210755914484542, 0.010204254593860098, 0.010004085849605842, 0.0001000817318748818]\n88 Tolerância: 2.650070866019025\n=====================================================================\n89 Derivada: [2.3707278044742566, -5.261088144135542, 0.21826662901733584, -1.1683437932538903, -0.11467246547650498, -0.004286752354254776, -7.257950705569921e-05, -1.3906313849065821e-06, -2.49439475759187e-08, -4.4318639036422525e-10]\n89 Solução: [0.7287087958619348, 0.5210217409227271, 0.2825197093590755, 0.08475495106297641, 0.0167806780134667, 0.010365968884286482, 0.010211244989587967, 0.010204263517107598, 0.01000408600907178, 0.0001000817346609738]\n89 Tolerância: 5.892812102867551\n=====================================================================\n90 Derivada: [-2.2741816998768343, 0.13907830042406033, -2.154788615164719, -0.6938197013492728, -0.14655221316088027, -0.004151000772595656, -7.882899287291883e-05, -1.4291532779708849e-06, -2.6110782090291162e-08, -4.6365625921973574e-10]\n90 Solução: [0.7243678636027656, 0.5306550810303972, 0.2821200512248885, 0.08689425244222548, 0.016990649568904635, 0.010373818162278892, 0.010211377886634577, 0.010204266063429715, 0.010004086054745513, 0.00010008173547247232]\n90 Tolerância: 3.2151578063810855\n=====================================================================\n91 Derivada: [1.0525284325497353, -3.7457529672048864, -0.43880803205721897, -1.0549267032369496, -0.13143808218167202, -0.00477197464631423, -8.42023721110835e-05, -1.557505902936851e-06, -2.795243847795786e-08, -5.023980780005655e-10]\n91 Solução: [0.7318633355139808, 0.5301966930773393, 0.28922201565475264, 0.08918101171180927, 0.017473670779273746, 0.010387499439239351, 0.010211637698989017, 0.01020427077377377, 0.010004086140804, 0.00010008173700063625]\n91 Tolerância: 4.057240808620091\n=====================================================================\n92 Derivada: [-1.7806290540156624, -0.4192328225400246, -1.9123661168324002, -0.7612555210845269, -0.1534472469516668, -0.004735891352348837, -9.000045898818754e-05, -1.6224645812135119e-06, -2.9253706250367134e-08, -5.23227839527296e-10]\n92 Solução: [0.7299360983938336, 0.5370553716061568, 0.29002549715876363, 0.0911126401967402, 0.017714341095768506, 0.010396237185784116, 0.01021179187813717, 0.010204273625652256, 0.010004086191986443, 0.00010008173792055461]\n92 Tolerância: 2.7580099011890002\n=====================================================================\n93 Derivada: [1.794313683886827, -4.562102907504972, -0.07247516492829931, -1.1754646522304006, -0.14244451518270038, -0.0057005424365692905, -0.00010044542757302322, -1.876834442873787e-06, -3.3169973953021525e-08, -5.914368612913723e-10]\n93 Solução: [0.7397173859219958, 0.5393582862807541, 0.30053043798609, 0.09529434166754144, 0.018557251998212964, 0.010422252213964743, 0.010212286265424093, 0.010204282538116385, 0.010004086352681851, 0.00010008174079473097]\n93 Tolerância: 5.043772128001608\n=====================================================================\n94 Derivada: [-2.1118798185428034, 0.02350489271556455, -2.1113486003938, -0.7588032787597501, -0.173783584634975, -0.005583783605798566, -0.00010757190320091758, -1.9475230440546554e-06, -3.4927927374146794e-08, -6.17889631820967e-10]\n94 Solução: [0.7364318994402694, 0.5477117461943984, 0.3006631439765593, 0.09744668172899848, 0.018818075695446912, 0.010432690218914516, 0.01021247018649509, 0.01020428597470289, 0.010004086413417887, 0.00010008174187768421]\n94 Tolerância: 3.0861620340958775\n=====================================================================\n95 Derivada: [0.972988321762557, -3.59513574433565, -0.49520165109275815, -1.1126875284759652, -0.15909972302960396, -0.006317520762600948, -0.00011424989417964382, -2.1246130093299787e-06, -3.787271433952893e-08, -6.712531121222387e-10]\n95 Solução: [0.7433924408343925, 0.5476342764552236, 0.30762193452961506, 0.09994762026983262, 0.019390848740508476, 0.010451093802576207, 0.010212824732562768, 0.010204292393541047, 0.010004086528536788, 0.00010008174391418568]\n95 Tolerância: 3.921779972378786\n=====================================================================\n96 Derivada: [-1.7717380245875631, -0.3496178966763921, -1.9471303892176834, -0.8156347343180752, -0.18371168541255128, -0.006304006015651009, -0.00012122045067675113, -2.20874932032461e-06, -3.9743019860554243e-08, -7.029342802278293e-10]\n96 Solução: [0.7416108460069464, 0.5542171666120882, 0.3085286758341062, 0.10198501198457133, 0.019682169034141783, 0.010462661528581945, 0.010213033930367052, 0.010204296283823657, 0.010004086597883799, 0.00010008174514328684]\n96 Tolerância: 2.784179378638387\n=====================================================================\n97 Derivada: [1.5062670515043806, -4.180997380701484, -0.22493108920330407, -1.2097213724752258, -0.17194361658487317, -0.007401333581805662, -0.00013275391080484755, -2.4838387909023596e-06, -4.4442055498585334e-08, -7.907708758636556e-10]\n97 Solução: [0.7500456339657983, 0.5558816072823005, 0.3177984616226181, 0.10586803867382974, 0.020556773005222045, 0.010492673275970909, 0.010213611029680577, 0.010204306799109728, 0.010004086787090069, 0.00010008174848977573]\n97 Tolerância: 4.614457576622279\n=====================================================================\n98 Derivada: [-2.0294490978486976, 0.0033921793535114375, -2.1051112097845657, -0.8160732778372446, -0.20440191674287675, -0.007327022786054564, -0.0001415486033424368, -2.5761975422922134e-06, -4.656727678855124e-08, -8.268182943860225e-10]\n98 Solução: [0.7472875766205144, 0.5635372421346592, 0.3182103227478683, 0.10808310466346943, 0.02087161117035548, 0.010506225522519626, 0.01021385410935124, 0.01020431134715439, 0.010004086868465904, 0.00010008174993772045]\n98 Tolerância: 3.0426925242084573\n=====================================================================\n99 Derivada: [1.0021538945091493, -3.5740021445105583, -0.4892745239391463, -1.1791643436554806, -0.1887236874951732, -0.008261473550881193, -0.00015079319972827188, -2.800652143038207e-06, -5.022019846295323e-08, -8.959714359324522e-10]\n99 Solução: [0.7539764347310997, 0.5635260618560282, 0.325148555494961, 0.11077279930477871, 0.021545299128370336, 0.01053037464547171, 0.010214320639171826, 0.010204319838039844, 0.010004087021946918, 0.00010008175266282957]\n99 Tolerância: 3.9297972939178054\n=====================================================================\n100 Derivada: [-1.805500818591355, -0.23185769018714097, -1.9993115557349963, -0.8615039552788033, -0.21691312295738482, -0.008268721173519684, -0.0001598960923635609, -2.9131500402165833e-06, -5.2576010520970305e-08, -9.358309474571946e-10]\n100 Solução: [0.7521414361449623, 0.5700702552358693, 0.32604444390549414, 0.11293191370356194, 0.021890862521000656, 0.010545501855342707, 0.01021459674976703, 0.010204324966187079, 0.010004087113902847, 0.00010008175430340227]\n100 Tolerância: 2.8460756589601943\n=====================================================================\n"
],
[
"df",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
],
[
"# Para n = 50\n\nimport numpy as np\nimport sympy as sym \n\nvariaveis = list(sym.symbols(\"x:50\"))\nc = variaveis\n\ndef f1(c):\n fo = 0\n for i in range(1,50):\n fo = fo + 100*(c[i] - c[i-1]**2)**2 + (1 - c[i-1])**2\n return fo\n\nx = []\nfor i in range(1,51):\n if (i%2 != 0):\n x.append(-1.2)\n else: \n x.append(1)\n\neps = 1e-3\nnmax = 100\nd1f = gradiente_simbolico(f1(c),c)\np = Parametros(f1,d1f,c,x,eps,nmax)\nm,df = steepestDescent(p)",
"1 Derivada: [-311.2609414050821, -135.48916022479534, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354645, 4.007166221737819, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, -121.52078589354642, 4.007166221737862, 64.00224724854343, 173.30654859542847]\n1 Solução: [-0.963134765625, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828124999999, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 0.1298828125, -0.479736328125, 1.0966796875]\n1 Tolerância: 699.563710954148\n=====================================================================\n2 Derivada: [-70.16386012806791, 238.32610122104512, -91.77547561404435, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436463, -3.0551036087249477, 21.099702048436473, -3.0551036087249663, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 21.099702048436466, -3.0551036087249646, 55.10116321277364, -196.48408487334314, -29.089245994059738]\n2 Solução: [0.2907240071249646, 0.6756765487571101, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936607, 0.11374066341340955, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, 0.009788322080936496, 0.11374066341340949, -0.7375578807619547, 0.39854539750376716]\n2 Tolerância: 350.190002148748\n=====================================================================\n3 Derivada: [-11.819553874572083, -0.35001771877826826, 19.37776602449332, 6.553742516850345, -0.4816069115053496, 12.83142213305249, -0.48160691150534973, 12.83142213305249, -0.48160691150533685, 12.8314221330525, -0.4816069115053512, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.4816069115053496, 12.83142213305249, -0.098533038763601, 2.490127467143401, -31.638030500031363, 61.817612131460436]\n3 Solução: [0.4191978721055577, 0.239288423962716, 0.17783423690157435, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276656, 0.015382383864490588, 0.0751059550727666, 0.015382383864490512, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.07510595507276654, 0.015382383864490508, 0.01284742022595775, -0.3777847761354719, 0.4518093977370308]\n3 Tolerância: 94.07066722971236\n=====================================================================\n4 Derivada: [0.9517953598751632, -1.9682670652409717, -5.4750758178398655, 3.37932359336928, 1.7188894502970726, -4.828494096814684, 1.852810876079327, -4.828494096814682, 1.8528108760793252, -4.828494096814686, 1.8528108760793298, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.828494096814684, 1.852810876079327, -4.843607282891036, 1.2359904677410851, -3.2388456100964533, -34.04866595369972, -0.6341285160658705]\n4 Solução: [0.5014384203050949, 0.24172384998448862, 0.043003589514352764, 0.02950496343940843, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498081, 0.018733408517298718, -0.014175180374498109, 0.018733408517298742, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.018733408517298728, -0.014175180374498095, 0.01606797751799701, -0.004478906144546872, -0.1576471127441404, 0.02168196957623436]\n4 Tolerância: 42.18137288536203\n=====================================================================\n5 Derivada: [-1.8365231959180974, -1.267009620743921, -0.896602716618977, -0.29955128429387023, 0.0584238953761243, -0.2768968711543185, -0.04772501394402841, -0.2742875873177894, -0.047725013944028366, -0.27428758731779096, -0.047725013944028276, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04772501394402841, -0.27428758731778946, -0.04801030004581676, -0.2609955682977828, -0.0028116905850604085, 0.17027034682157313, -1.1690077730219695, 4.936218233298063]\n5 Solução: [0.49690716798147044, 0.25109426203824814, 0.06906901394010408, 0.013416875043241204, 0.01055021899562077, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.00991265361408904, 0.008812035174302277, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008812035174302284, 0.009912653614089042, 0.008883985156843507, 0.010183745554874168, 0.01094040279023849, 0.004449807689732771, 0.02470089195496592]\n5 Tolerância: 5.766871981017148\n=====================================================================\n6 Derivada: [-1.0996215820891493, -2.1527862141460803, -0.6247839269679659, -0.08847564721061872, -0.03154090857851165, 0.06493515665952201, -0.00236297892134283, 0.061623044051352785, -0.0022945724083838076, 0.06162304405135366, -0.0022945724083838354, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06162304405135278, -0.0022945724083833566, 0.06161557059407559, -0.0018989404603309765, 0.05951255330486611, -0.001326840376213237, -0.07094159903677168, 0.3783758782395488, -1.2334989335923567]\n6 Solução: [0.5083406009638707, 0.2589821393158287, 0.07465089120421148, 0.015281757306301188, 0.0101864960141532, 0.010535880441498358, 0.010209769789765977, 0.010519636120738717, 0.010209769789765975, 0.01051963612073872, 0.010209769789765975, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010209769789765977, 0.010519636120738717, 0.010211545862909434, 0.010508835496588004, 0.010201249976241122, 0.009880370113492855, 0.011727566042286537, -0.0060299588626855916]\n6 Tolerância: 2.8271130464465664\n=====================================================================\n7 Derivada: [-3.8049140835313082, 1.2393865915806828, -2.0448142112885863, -0.11828614081827722, 0.04480733877086108, -0.10168999155523153, 0.010235675882663643, -0.0950357645793618, 0.009960002784515691, -0.09503217337119715, 0.009960002784515885, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009960002784514775, -0.0950321733711954, 0.009959610459419135, -0.09500038162832716, 0.009239935983280996, -0.09171924931216088, 0.0014169504499976018, 0.1294468044873305, -0.6607487345879411, 1.9465930687520054]\n7 Solução: [0.5224348717303942, 0.2865752243360116, 0.08265898597125694, 0.016415783546183386, 0.010590767913658048, 0.009703581680603606, 0.010240056995178697, 0.009729789975060977, 0.01023918020271523, 0.009729789975060969, 0.01023918020271523, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729789975060977, 0.010239180202715227, 0.009729885765223831, 0.010235885309727641, 0.009746040318730224, 0.010218256597274128, 0.010789655745678039, 0.006877777563874352, 0.00978026916797816]\n7 Tolerância: 4.964433660705562\n=====================================================================\n8 Derivada: [-0.13648043120700493, -3.261209014388619, -0.21683070701487134, -0.2719805956031185, 0.01153912380677228, -0.03456460060864844, 0.0009459903150444773, -0.03273997300894337, 0.0009351615985625922, -0.03274244686025144, 0.0009352091472337974, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274244686025057, 0.0009352091472338009, -0.03274245205474924, 0.0009355020242367493, -0.03274053058477812, 0.0007309007773192641, -0.03171195853908125, 0.0010394226563873454, 0.0353731000819745, -0.2041495683944815, 0.6564986681169633]\n8 Solução: [0.5349754821131267, 0.2824903320053624, 0.08939848593522079, 0.016805642652884253, 0.010443087475814633, 0.010038741564879881, 0.01020632124703027, 0.010043018203044714, 0.01020635304510025, 0.010043006366787326, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635304510025, 0.01004300636678733, 0.01020635433816392, 0.010042997374594539, 0.010205431419157746, 0.010048337649226849, 0.010213586472499966, 0.010363012225028879, 0.009055538285782849, 0.0033644961142300957]\n8 Tolerância: 3.3573411672305076\n=====================================================================\n9 Derivada: [-2.822612392164494, 0.019170448175813704, -1.5963346790069273, -0.08589924960170017, -0.005854092251544145, -0.00678078308618419, -0.000910165230386735, -0.006590153533382312, -0.0008827092627705854, -0.006590831040310836, -0.0008827395844172581, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827395844171818, -0.006590830258624121, -0.0008827396698132539, -0.006590826575518356, -0.0008826490371554724, -0.00659373900897367, -0.0009070172190786055, -0.006384442450000326, 0.00027564998826011544, 0.0037423523373417977, -0.028011287105867665, 0.12446718551316918]\n9 Solução: [0.5355252690064166, 0.2956275265213322, 0.09027194947666253, 0.017901267610758144, 0.01039660418899536, 0.01017797884760515, 0.010202510495028749, 0.010174905301347343, 0.01020258591463729, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903430555429, 0.010202585723096012, 0.010174903451480534, 0.010202585836357301, 0.010174886718991229, 0.01020248711671005, 0.010176083575958988, 0.010209399345099969, 0.010220518047452566, 0.009877918138934447, 0.0007199091943253361]\n9 Tolerância: 3.246601941008091\n=====================================================================\n10 Derivada: [-0.13080765423704577, -3.223466277616925, -0.28019504951561186, -0.25794634486714535, -0.0037989849587342223, -0.0014843359570648682, -0.0004037419288013405, -0.0013621180174747463, -0.0003950346440109928, -0.00136180410160483, -0.00039505187750536394, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.0013618044419113698, -0.000395051864656, -0.001361804443315101, -0.00039505182138385025, -0.0013618022113255288, -0.00039508130755582654, -0.0013627891776175419, -0.000396615317603044, -0.0013014963618035724, 1.2488122339025798e-05, 0.00029870914650953484, -0.003171745910925273, 0.023740057145358727]\n10 Solução: [0.546895655840087, 0.2955503018155458, 0.09670249688965431, 0.01824729730275718, 0.010420186347723504, 0.010205294013845883, 0.010206176932114047, 0.010201452550688361, 0.01020614175029054, 0.010201453409111369, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141680894568, 0.010201453405962483, 0.010206141681238571, 0.01020145341205086, 0.010206141429402482, 0.010201448411776401, 0.010206140872597452, 0.010201802155164311, 0.01020828893865312, 0.01020544265351552, 0.009990756575762283, 0.00021851550268293094]\n10 Tolerância: 3.248618399535278\n=====================================================================\n11 Derivada: [-2.846088340865151, 0.06440997814655525, -1.6571100748586427, -0.09597450297544464, -0.008593284361150692, -0.00037064011812332903, -0.00012846206276853622, -0.00028858670499947586, -0.0001246766951416428, -0.00028838026686753976, -0.00012467502457978383, -0.000288380618885066, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502757125548, -0.0002883806186741236, -0.00012467502759434118, -0.0002883806182471041, -0.00012467498215961575, -0.00028838065057549406, -0.000124697138129376, -0.00028860594908114434, -0.00012401565024657002, -0.0002695125219515371, -1.396132124290711e-05, 8.356064122631846e-06, -0.00020283787002334106, 0.004562453618948074]\n11 Solução: [0.5474225909706806, 0.3085354564983288, 0.09783121229907998, 0.01928638780332063, 0.010435489875999656, 0.0102112733945323, 0.010207803333926844, 0.010206939598366176, 0.01020773307637115, 0.010206939192235509, 0.010207733076397215, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190457488, 0.010207733076345453, 0.010206939190463142, 0.010207733076515142, 0.01020693918756035, 0.0102077329434588, 0.010206938163102253, 0.010207738566137601, 0.01020704499939521, 0.010208238632496237, 0.010204239357393105, 0.010003533384241352, 0.00012288294826437161]\n11 Tolerância: 3.295404254212489\n=====================================================================\n12 Derivada: [0.02972243112429851, -3.3673645455793313, -0.26231849292149056, -0.2897241628616292, -0.004756321839893149, -0.00022180177868868362, -3.6823695308121906e-05, -6.252282278763205e-05, -3.470250097090122e-05, -6.241876900708182e-05, -3.469877312516079e-05, -6.241881276376066e-05, -3.4698779516062483e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127701722e-05, -3.469877951295386e-05, -6.24188127705469e-05, -3.4698779510719535e-05, -6.24188119371788e-05, -3.4698770846566807e-05, -6.241918209994729e-05, -3.4706957476808575e-05, -6.245390028133263e-05, -3.425880905884304e-05, -5.677152573407146e-05, -7.11866221667079e-06, -1.877726865835705e-06, 3.595894487970453e-05, 0.0008833790293160289]\n12 Solução: [0.5588875464453571, 0.30827599247503334, 0.10450658247368146, 0.019673003843138706, 0.010470106377942767, 0.010212766451648765, 0.01020832082026368, 0.010208102118051843, 0.010208235314034685, 0.010208100880322256, 0.010208235307331191, 0.010208100879962274, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.01020823530729148, 0.010208100879961424, 0.010208235307291572, 0.010208100879965357, 0.010208235307278235, 0.010208100877192794, 0.010208235263473238, 0.010208100760309244, 0.010208238140900558, 0.01020813068216186, 0.01020829487317019, 0.010204205696490267, 0.010004350480153312, 0.00010450392367632394]\n12 Tolerância: 3.3901035445808128\n=====================================================================\n13 Derivada: [-3.032456396304184, 0.31242409818549177, -1.8019561418562762, -0.10283865938929862, -0.010422082647906804, -0.0001257759409076728, -1.21252460995383e-05, -1.3820176103525317e-05, -9.072838421754492e-06, -1.3764175786121213e-05, -9.070373573501905e-06, -1.3764123420474672e-05, -9.070375585770729e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585285007e-06, -1.3764123526854855e-05, -9.070375585291945e-06, -1.376412352675771e-05, -9.070375571094969e-06, -1.3764123215714852e-05, -9.070379894046687e-06, -1.3764332586832517e-05, -9.072612589845763e-06, -1.3764116804634852e-05, -8.889089351593604e-06, -1.2161207438160515e-05, -2.404232691329e-06, 9.504862627518484e-08, 2.0896311615901605e-05, 0.0001722519562997668]\n13 Solução: [0.5587678149723222, 0.3218408154735829, 0.10556328538706149, 0.020840105573416264, 0.010489266365823197, 0.010213659940259205, 0.010208469157903471, 0.010208353980008873, 0.01020837510682424, 0.010208352323117328, 0.01020837508510379, 0.010208352322933613, 0.010208375085089823, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.010208352322932789, 0.01020837508508981, 0.01020835232293279, 0.010208375085089894, 0.010208352322933366, 0.010208375085041655, 0.010208352321651936, 0.010208375074215027, 0.010208352344624342, 0.010208376146356972, 0.010208359376052146, 0.010208323549421796, 0.010204213260575542, 0.010004205626005236, 0.00010094538998889365]\n13 Tolerância: 3.5427575433336167\n=====================================================================\n14 Derivada: [-0.2528378656326282, -2.995666802854867, -0.45056956646668167, -0.2937385517691392, -0.0064693314725059875, -0.00018820106774363604, -6.090430805190394e-06, -5.08468940516732e-06, -3.522005847279852e-06, -5.024126837199772e-06, -3.5203954606710663e-06, -5.024075505670467e-06, -3.5203954568754914e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577012198e-06, -5.024075569022568e-06, -3.5203954577081586e-06, -5.024075568738073e-06, -3.5203954484794298e-06, -5.02407551920131e-06, -3.5203997634652984e-06, -5.024178334769869e-06, -3.5211750359781524e-06, -5.021663549514832e-06, -3.4358667475933857e-06, -4.3752676073191554e-06, -9.973060536225264e-07, 2.8173999119807114e-07, 9.251770493582615e-06, 5.8982568969523363e-05]\n14 Solução: [0.568762483270688, 0.320811097376536, 0.11150234981944912, 0.021179051350212048, 0.01052361649173793, 0.010214074484986319, 0.010208509121483144, 0.010208399529905697, 0.010208405009978218, 0.010208397688442795, 0.01020840498013388, 0.010208397688086488, 0.010208404980126543, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086015, 0.01020840498012653, 0.010208397688086016, 0.010208404980126566, 0.010208397688085567, 0.010208404980092576, 0.010208397687494203, 0.010208404976624686, 0.010208397709755412, 0.010208405443892676, 0.01020839945815674, 0.010208331473528566, 0.010204212947304923, 0.01000413675388443, 0.00010037766503526894]\n14 Tolerância: 3.054060206193067\n=====================================================================\n15 Derivada: [-3.188094343964295, 0.5282815683482198, -1.937180663384058, -0.11321149591264922, -0.012611420867856358, -0.00014071438825392912, -4.107854288185053e-06, -4.775805612991513e-07, -3.978830233342112e-07, -4.2415553026114994e-07, -3.966126066352882e-07, -4.2412128933161375e-07, -3.9661160963072595e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109213602e-07, -4.241212939390393e-07, -3.966116109282991e-07, -4.241212934671945e-07, -3.966116094364369e-07, -4.241213735142746e-07, -3.966138540020814e-07, -4.2414240698784855e-07, -3.966112915240738e-07, -4.2235531978046525e-07, -3.791191033192254e-07, -3.363336254849636e-07, -1.3659015816319675e-07, 1.763905764923246e-07, 1.4802374065368048e-06, 2.998661755664739e-06]\n15 Solução: [0.5699661791642027, 0.3350726947045804, 0.11364739536293064, 0.02257746486571454, 0.01055441531100401, 0.010214970461749259, 0.010208538116454018, 0.010208423736801059, 0.01020842177734004, 0.010208421607015385, 0.010208421739829065, 0.010208421606414702, 0.010208421739821711, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.010208421606414528, 0.010208421739821701, 0.01020842160641453, 0.010208421739821694, 0.010208421606413845, 0.010208421739808246, 0.01020842160631196, 0.01020842174003123, 0.010208421616600924, 0.010208421801168452, 0.01020842028767782, 0.01020833622144557, 0.010204211606013461, 0.010004092708590333, 0.0001000968642308486]\n15 Tolerância: 3.7694391883463205\n=====================================================================\n16 Derivada: [0.0010828939257834236, -3.2466252911678666, -0.38565677878974536, -0.3379604675541428, -0.0077789319226171105, -0.0002247491345647376, -3.3280254235559803e-06, -2.2649219652559616e-07, -1.5031061451065142e-07, -1.5799110788389248e-07, -1.4914995434892053e-07, -1.5796210582619263e-07, -1.4914914687330638e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.5796209364149494e-07, -1.4914914774760701e-07, -1.579620935859838e-07, -1.4914914819863512e-07, -1.5796215247637635e-07, -1.4915021208372625e-07, -1.5796945216500768e-07, -1.4912555353768786e-07, -1.5711339111657008e-07, -1.4186919557179767e-07, -1.2374073207754854e-07, -4.9586143233137925e-08, 7.92836297447419e-08, 5.362692136499448e-07, 1.0415276673648999e-06]\n16 Solução: [0.5804738143310771, 0.33333153230890145, 0.12003214608453337, 0.022950598458200078, 0.010595981273337033, 0.010215434242081638, 0.010208551655524548, 0.010208425310858084, 0.010208423088722074, 0.010208423004988934, 0.010208423047023935, 0.010208423004275397, 0.010208423047013295, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.01020842304701329, 0.01020842300427524, 0.010208423047013278, 0.010208423004274818, 0.010208423047007227, 0.010208423004242255, 0.010208423047221766, 0.010208423008641162, 0.010208423050706511, 0.010208421396199291, 0.010208336671632859, 0.010204211024648035, 0.010004087829878178, 0.00010008698094625351]\n16 Tolerância: 3.286880883888768\n=====================================================================\n17 Derivada: [-3.0367635113172184, 0.36612100516715174, -1.9030969259485784, -0.14319043961777622, -0.014445499128635481, -0.00017851526515997446, -4.376362717474491e-06, -1.0301859751371545e-07, -3.672092498130386e-08, -3.687556041770401e-08, -3.5358951024078156e-08, -3.68505905859462e-08, -3.5358310210287236e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.6850574654245793e-08, -3.535830975232024e-08, -3.685057461261243e-08, -3.535831118173238e-08, -3.685060399188922e-08, -3.535864702419733e-08, -3.6851692530870395e-08, -3.5339699028247296e-08, -3.655880902175834e-08, -3.3309252592261807e-08, -2.8172682124683046e-08, -1.075849574705412e-08, 2.400890814630019e-08, 1.178157871736525e-07, 2.1105084371461635e-07]\n17 Solução: [0.5804694520874796, 0.3464099789163892, 0.12158569512018542, 0.02431201147447043, 0.010627317302810467, 0.010216339603585622, 0.010208565061876961, 0.0102084262232412, 0.010208423694221376, 0.010208423641427723, 0.010208423647847725, 0.010208423640597356, 0.010208423647833833, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.010208423647833831, 0.01020842364059715, 0.01020842364783382, 0.010208423640596966, 0.010208423647832057, 0.010208423640593808, 0.010208423647947263, 0.010208423641544227, 0.010208423622201073, 0.010208421894666595, 0.010208336871381727, 0.010204210705268179, 0.010004085669614013, 0.00010008278533919504]\n17 Tolerância: 3.6053387261084353\n=====================================================================\n18 Derivada: [-0.001500007970577144, -3.20217933715654, -0.4371805225671509, -0.36291819880290666, -0.009652773247727842, -0.0002648965090548891, -3.925643490353126e-06, -9.516952908378284e-08, -1.4634996894913321e-08, -1.3776027624201603e-08, -1.3271503878503621e-08, -1.3749017653552631e-08, -1.3270945342647167e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.374900329004225e-08, -1.3270944433652065e-08, -1.3749003269225568e-08, -1.3270945398158318e-08, -1.3749018000497326e-08, -1.3271077334287007e-08, -1.3749145419406084e-08, -1.3260570731754573e-08, -1.3619853279911442e-08, -1.243864169997444e-08, -1.0376737073769515e-08, -3.792133843050216e-09, 9.771391251689465e-09, 4.2452838149692994e-08, 7.348428456011025e-08]\n18 Solução: [0.590478316199487, 0.34520328126752287, 0.1278581093048304, 0.024783952620671595, 0.010674928200817444, 0.010216927971769133, 0.010208579485924003, 0.010208426562780034, 0.010208423815249815, 0.010208423762965825, 0.010208423764387235, 0.01020842376205316, 0.010208423764371232, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371229, 0.010208423762052901, 0.010208423764371224, 0.010208423762052814, 0.010208423764370566, 0.010208423762053245, 0.010208423764423321, 0.010208423762038349, 0.010208423731984986, 0.010208421987520894, 0.010208336906840637, 0.010204210626137257, 0.010004085281305143, 0.000100082089737049]\n18 Tolerância: 3.2522122365995134\n=====================================================================\n19 Derivada: [-3.0465526181371416, 0.40447364270749375, -1.9541148306139622, -0.16380127510448178, -0.01687688737147161, -0.00021997100867493513, -5.156599530667383e-06, -8.40607759372558e-08, -4.75161635038468e-09, -3.2448903009019148e-09, -3.1365881281164576e-09, -3.216990597521008e-09, -3.1360303173744164e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978662623493e-09, -3.1360302896188408e-09, -3.216978655684599e-09, -3.1360306088079604e-09, -3.216983360254666e-09, -3.1360592803175713e-09, -3.216838719011239e-09, -3.131810227818832e-09, -3.176999177623152e-09, -2.910122615107369e-09, -2.3654123451621878e-09, -7.767965576332969e-10, 2.6084499837875086e-09, 9.239613646286424e-09, 1.4964971300351904e-08]\n19 Solução: [0.5904843587120637, 0.35810268533565837, 0.12961921248411704, 0.02624590337268526, 0.010713812663363223, 0.010217995059757269, 0.010208595299673415, 0.010208426946153381, 0.010208423874204271, 0.010208423818460077, 0.010208423817849104, 0.010208423817438606, 0.010208423817830851, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830844, 0.01020842381743829, 0.010208423817830842, 0.010208423817438262, 0.010208423817830718, 0.010208423817439206, 0.010208423817841147, 0.01020842381690348, 0.01020842378209182, 0.010208422029321715, 0.010208336922116567, 0.010204210586774962, 0.010004085110291513, 0.00010008179371881286]\n19 Tolerância: 3.6456497760229\n=====================================================================\n20 Derivada: [0.12856953428645568, -3.3110124393364586, -0.4231089029363273, -0.3994256689155275, -0.011589574425357817, -0.0003154587481324356, -4.755220240904023e-06, -9.865678565063041e-08, -2.8251087441644174e-09, -1.2330305595997793e-09, -1.1762237434043143e-09, -1.2015987926883298e-09, -1.1756548373709208e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871422854651e-09, -1.175654816554239e-09, -1.2015871353465712e-09, -1.1756551981934038e-09, -1.2015891892591668e-09, -1.175662761587759e-09, -1.2014822886596832e-09, -1.173649184782466e-09, -1.1846066974574576e-09, -1.085203767126064e-09, -8.710674265222096e-10, -2.6648686729524584e-10, 1.0189913496327918e-09, 3.3181678364712676e-09, 5.2222272686752724e-09]\n20 Solução: [0.6005254867259434, 0.35676958128864883, 0.13605977650103318, 0.026785775739362628, 0.01076943717008072, 0.010218720061861057, 0.01020861229530175, 0.01020842722320916, 0.010208423889865115, 0.010208423829154906, 0.01020842382818698, 0.01020842382804148, 0.010208423828166889, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.010208423828166882, 0.010208423828041125, 0.01020842382816688, 0.010208423828041113, 0.01020842382816685, 0.01020842382804158, 0.010208423828163275, 0.010208423827374547, 0.010208423791683288, 0.010208422037117875, 0.01020833692467681, 0.010204210578177777, 0.010004085079838685, 0.00010008174439578734]\n20 Tolerância: 3.3642279835189837\n=====================================================================\n21 Derivada: [-2.6145642213376448, -0.08555780458842577, -1.778594559762299, -0.21469419580672058, -0.01848050665703182, -0.0002744551821900684, -5.9027547454418294e-06, -9.829777925657313e-08, -2.325443615447398e-09, -4.820519122761269e-10, -4.412393367347889e-10, -4.489371999150116e-10, -4.406181461358294e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892540379537493e-10, -4.4061767429104393e-10, -4.4892539685648103e-10, -4.4061809062467816e-10, -4.4892670136853496e-10, -4.4061897880309786e-10, -4.4886235006647013e-10, -4.3969138052712964e-10, -4.417841995008054e-10, -4.0452990263117883e-10, -3.206910362263393e-10, -9.055182992323552e-11, 3.946482932115636e-10, 1.1907353371538865e-09, 1.8236043751451714e-09]\n21 Solução: [0.6001017345987786, 0.3676823420140009, 0.13745430047311336, 0.02810224217743871, 0.010807635230320547, 0.010219759781856122, 0.010208627968024712, 0.010208427548371906, 0.010208423899176387, 0.01020842383321885, 0.010208423832063693, 0.010208423832001828, 0.010208423832041728, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.010208423832041721, 0.010208423832001434, 0.01020842383204172, 0.010208423832001429, 0.010208423832041716, 0.010208423832001544, 0.010208423832031504, 0.01020842383127889, 0.01020842379526001, 0.010208422039988825, 0.010208336925555122, 0.010204210574819284, 0.010004085068902342, 0.00010008172718385665]\n21 Tolerância: 3.1706623938289074\n=====================================================================\n22 Derivada: [0.4051483912280389, -3.5840175668349303, -0.34153998033326216, -0.44794632810925916, -0.013612530892818213, -0.00037853850317152993, -5.715103413211908e-06, -1.1701366472299801e-07, -2.0951034726546247e-09, -1.4299202100165331e-10, -1.045433323243472e-10, -1.0529165039852018e-10, -1.0387284088420046e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527919508396266e-10, -1.0387280618973094e-10, -1.0527918814506876e-10, -1.0387324334004688e-10, -1.0527919508396266e-10, -1.038722163837491e-10, -1.0525097460245547e-10, -1.0356700219649184e-10, -1.0322632332249171e-10, -9.434752978876304e-11, -7.299522791770485e-11, -1.7094263504713325e-11, 9.776653098203525e-11, 2.5731030338111555e-10, 3.735863562948083e-10]\n22 Solução: [0.6106340367599288, 0.36802699625611734, 0.1446190490659058, 0.028967099167382774, 0.01088208063067236, 0.010220865375241408, 0.010208651746211553, 0.010208427944346846, 0.010208423908544019, 0.01020842383516071, 0.010208423833841147, 0.010208423833810291, 0.01020842383381668, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.010208423833816671, 0.010208423833809849, 0.01020842383381667, 0.010208423833809849, 0.010208423833816671, 0.010208423833809705, 0.01020842383380272, 0.010208423833058538, 0.010208423796889587, 0.010208422041280672, 0.010208336925919895, 0.010204210573229514, 0.010004085064105678, 0.0001000817198377941]\n22 Tolerância: 3.6505914662763947\n=====================================================================\n23 Derivada: [-2.8760832185222824, 0.255770979910956, -1.95163666680226, -0.2204969985070143, -0.02227475914027948, -0.00032722824855914434, -7.0873176896690815e-06, -1.1758185616828065e-07, -2.3043148916768175e-09, -7.926077155717692e-11, -3.9646622790323605e-11, -3.9370222360002316e-11, -3.8906176891284616e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671927246531e-11, -3.890569116871134e-11, -3.935671233357141e-11, -3.890515687388074e-11, -3.9342661073415996e-11, -3.877165949406347e-11, -3.8511763222892625e-11, -3.51366019613053e-11, -2.6849224232794455e-11, -5.60351764988809e-12, 3.71570552104572e-11, 9.217538429577565e-11, 1.3071947690956875e-10]\n23 Solução: [0.6092987088103247, 0.3798395541546211, 0.14574473015342998, 0.030443484770281942, 0.01092694614997242, 0.010222112999702545, 0.010208670582611963, 0.010208428330012, 0.010208423915449268, 0.010208423835631997, 0.010208423834185711, 0.010208423834157323, 0.010208423834159035, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159026, 0.010208423834156839, 0.010208423834159024, 0.010208423834156601, 0.010208423834144068, 0.010208423833398761, 0.010208423797200546, 0.010208422041521257, 0.010208336925976235, 0.010204210572907286, 0.01000408506325761, 0.00010008171860649142]\n23 Tolerância: 3.4921732991950822\n=====================================================================\n24 Derivada: [0.20028497461161976, -3.314394947815398, -0.4756169265940613, -0.4602307210787817, -0.01669457564709734, -0.0004356935166085901, -6.872926128602408e-06, -1.3625038629283548e-07, -2.3837584267605294e-09, -5.907158789897338e-11, -1.53646609324376e-11, -1.4729335806595856e-11, -1.4570837592042807e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.471476412939765e-11, -1.457078901978548e-11, -1.4714750251609843e-11, -1.4570067374819473e-11, -1.4708484430414615e-11, -1.4512460677629235e-11, -1.436895741280253e-11, -1.308175789915822e-11, -9.871943540407102e-12, -1.807207161697022e-12, 1.4058358643875835e-11, 3.3007361546679576e-11, 4.5767552941944345e-11]\n24 Solução: [0.6187779869963722, 0.37899655898157475, 0.15217712639411127, 0.03117022048313465, 0.011000361493818556, 0.010223191510775676, 0.010208693941691263, 0.010208428717549857, 0.010208423923044056, 0.010208423835893233, 0.010208423834316383, 0.010208423834287083, 0.010208423834287265, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287255, 0.010208423834286554, 0.010208423834287252, 0.01020842383428627, 0.010208423834271854, 0.010208423833525692, 0.010208423797316353, 0.010208422041609749, 0.010208336925994703, 0.01020421057278482, 0.01000408506295381, 0.0001000817181756533]\n24 Tolerância: 3.385798372227593\n=====================================================================\n25 Derivada: [-2.704844575448149, 0.08379764697927783, -1.9076973881095955, -0.25527183988898217, -0.025175826718971943, -0.00039411502901652573, -8.261247237704994e-06, -1.398493376034149e-07, -2.6623235491185504e-09, -5.280238052352004e-11, -6.329194113252612e-12, -5.518162315976127e-12, -5.456336771292314e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502952260538763e-12, -5.45585104871904e-12, -5.502924504963147e-12, -5.455906559870272e-12, -5.499357913496539e-12, -5.431190219784554e-12, -5.3612947414904966e-12, -4.868987157902183e-12, -3.6292774341362133e-12, -5.708836181561594e-13, 5.297977334617343e-12, 1.181570607414889e-11, 1.6033847860530415e-11]\n25 Solução: [0.618117868061495, 0.3899204681113374, 0.15374471147932117, 0.032687094197627706, 0.01105538511960855, 0.010224627512356296, 0.01020871659415775, 0.010208429166617293, 0.01020842393090068, 0.010208423836087926, 0.010208423834367023, 0.01020842383433563, 0.01020842383433529, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.01020842383433528, 0.010208423834335052, 0.010208423834335272, 0.010208423834334747, 0.010208423834319686, 0.01020842383357305, 0.01020842379735947, 0.010208422041642285, 0.01020833692600066, 0.010204210572738485, 0.01000408506284502, 0.00010008171802480809]\n25 Tolerância: 3.3208904284027394\n=====================================================================\n26 Derivada: [0.13838333869742314, -3.2054053835097243, -0.5448720467239534, -0.48362521383345564, -0.019889457927065424, -0.0005052911455940001, -8.183707418216168e-06, -1.597882392304073e-07, -2.807413031213901e-09, -5.425278282178425e-11, -2.983148450486084e-12, -2.074902749615859e-12, -2.042650770750498e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.0577151094158808e-12, -2.0430532265969248e-12, -2.057687353840265e-12, -2.0430671043847326e-12, -2.0564036584680423e-12, -2.0325893745898327e-12, -2.0004622958147422e-12, -1.8118770372943516e-12, -1.333308463635774e-12, -1.7573442701035447e-13, 1.9896653769002626e-12, 4.228960335129345e-12, 5.620493653823999e-12]\n26 Solução: [0.6270327610713949, 0.38964427957759223, 0.16003228832001443, 0.03352844425585555, 0.01113836208755438, 0.010225926475464628, 0.010208743822389612, 0.010208429627546506, 0.010208423939675429, 0.010208423836261957, 0.010208423834387883, 0.010208423834353816, 0.010208423834353273, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353261, 0.010208423834353188, 0.010208423834353254, 0.010208423834352873, 0.010208423834337586, 0.01020842383359072, 0.010208423797375517, 0.010208422041654246, 0.010208336926002542, 0.010204210572721023, 0.010004085062806077, 0.00010008171797196215]\n26 Tolerância: 3.29012892782966\n=====================================================================\n27 Derivada: [-2.6543101378739777, 0.05666422792039327, -1.9224756233946758, -0.28301207382630234, -0.02879902577629236, -0.000468508149286527, -9.656755128067962e-06, -1.6567117019222755e-07, -3.1261650629277327e-09, -5.666382640878709e-11, -1.79374223807649e-12, -7.883069197411885e-13, -7.651726474655618e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695788450945429e-13, -7.646938637861922e-13, -7.695510895189273e-13, -7.646799859983844e-13, -7.689959780066147e-13, -7.599823548254392e-13, -7.466458007421295e-13, -6.739608870987013e-13, -4.8976100952558e-13, -5.2659265836751956e-14, 7.450429162503269e-13, 1.5130926026007963e-12, 1.9713571053348034e-12]\n27 Solução: [0.626576663641606, 0.400208970172656, 0.16182813124744932, 0.03512242384246484, 0.011203915720858916, 0.010227591863761874, 0.010208770795058105, 0.010208430154192314, 0.010208423948928376, 0.010208423836440769, 0.010208423834397715, 0.010208423834360655, 0.010208423834360006, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359995, 0.010208423834359971, 0.010208423834359989, 0.01020842383435965, 0.010208423834344286, 0.010208423833597314, 0.010208423797381488, 0.01020842204165864, 0.010208336926003122, 0.010204210572714466, 0.010004085062792139, 0.00010008171795343758]\n27 Tolerância: 3.290199249696503\n=====================================================================\n28 Derivada: [0.18949395258960067, -3.2266204889301164, -0.5573958764364795, -0.5169074767975702, -0.02328493069628985, -0.0005900169192573912, -9.67344934604819e-06, -1.8754429807060768e-07, -3.316066025937925e-09, -6.177530015305521e-11, -1.396199128533837e-12, -3.079689281371145e-13, -2.8679836283629356e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.8765184678647415e-13, -2.8586855105317e-13, -2.876240912108585e-13, -2.858616121592661e-13, -2.874298021815491e-13, -2.843489332882143e-13, -2.782982178040072e-13, -2.510353036555557e-13, -1.8022389136618244e-13, -1.4890866317784912e-14, 2.779929064722353e-13, 5.412691779158041e-13, 6.918354777951663e-13]\n28 Solução: [0.6353250002776653, 0.400022210632391, 0.16816441565072768, 0.03605520289438258, 0.01129883438491652, 0.010229136019039064, 0.010208802622742243, 0.010208430700227665, 0.010208423959231898, 0.010208423836627527, 0.010208423834403627, 0.010208423834363253, 0.010208423834362528, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362516, 0.010208423834362507, 0.010208423834362509, 0.010208423834362185, 0.01020842383434679, 0.010208423833599775, 0.010208423797383709, 0.010208422041660255, 0.010208336926003295, 0.01020421057271201, 0.010004085062787152, 0.00010008171794694019]\n28 Tolerância: 3.3204538509818775\n=====================================================================\n29 Derivada: [-2.7139532226820933, 0.15993746167830736, -1.9898687869011464, -0.30308920661057015, -0.0332445463177279, -0.0005530562143994597, -1.1319402478449092e-05, -1.9536792909141676e-07, -3.6764602612260333e-09, -6.610077762925215e-11, -1.3204749793604975e-12, -1.2977119379087299e-13, -1.0745571099590734e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0739326095077217e-13, -1.0740019984467608e-13, -1.0738632205686827e-13, -1.0780959458500661e-13, -1.0737938316296436e-13, -1.0760142776788939e-13, -1.0661610483353456e-13, -1.0366707492437399e-13, -9.330036743193659e-14, -6.596806434444602e-14, -3.795574965437254e-15, 1.0407646966470452e-13, 1.9364739846372547e-13, 2.429272061288401e-13]\n29 Solução: [0.6347004474554094, 0.41065682406026127, 0.17000153584894362, 0.037758877439491766, 0.011375579151615717, 0.010231080654881342, 0.010208834505448827, 0.010208431318354624, 0.010208423970161315, 0.010208423836831132, 0.01020842383440823, 0.010208423834364268, 0.010208423834363474, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363456, 0.010208423834363458, 0.010208423834363455, 0.010208423834363451, 0.010208423834363132, 0.010208423834347728, 0.010208423833600693, 0.010208423797384536, 0.010208422041660848, 0.010208336926003344, 0.010204210572711094, 0.010004085062785368, 0.00010008171794465996]\n29 Tolerância: 3.3828491613817335\n=====================================================================\n30 Derivada: [0.35565580921519313, -3.380094371728525, -0.5118054054666947, -0.5603686382377877, -0.0268381726428829, -0.0006931873722461693, -1.1393991594742248e-05, -2.2024294983602477e-07, -3.906967083189272e-09, -7.245165578595447e-11, -1.3497709894227938e-12, -6.408068520258325e-14, -4.057865155004947e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0523140398818214e-14, -4.0072112295064244e-14, -4.0495384823202585e-14, -4.005129561335252e-14, -4.0273340218277554e-14, -3.968353423644544e-14, -3.877453913503359e-14, -3.499978085130806e-14, -2.4244495300251856e-14, -6.730727086790012e-16, 3.8295755455663993e-14, 6.979069015408679e-14, 8.534839501805891e-14]\n30 Solução: [0.6436453616414954, 0.410129686430218, 0.17655994127452113, 0.03875782868198266, 0.011485149799879713, 0.01023290347199423, 0.01020887181304977, 0.010208431962267476, 0.010208423982278555, 0.010208423837048994, 0.010208423834412582, 0.010208423834364697, 0.010208423834363827, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363812, 0.01020842383436381, 0.010208423834363805, 0.010208423834363486, 0.01020842383434808, 0.010208423833601035, 0.010208423797384843, 0.010208422041661065, 0.010208336926003356, 0.01020421057271075, 0.01000408506278473, 0.00010008171794385929]\n30 Tolerância: 3.4825576188132454\n=====================================================================\n31 Derivada: [-2.899045512974169, 0.4110703857861502, -2.11762073481607, -0.3134240564879866, -0.03864384951119763, -0.0006488157247887816, -1.33123973612842e-05, -2.2988897076248183e-07, -4.321879812363427e-09, -7.776045779506191e-11, -1.4444764828702716e-12, -4.103661854770735e-14, -1.544597783009749e-14, -1.5390466678866233e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.494637746901617e-14, -1.491862189340054e-14, -1.4925560787304448e-14, -1.469657728847551e-14, -1.4960255256823984e-14, -1.444677710793485e-14, -1.3135326160096383e-14, -9.027500968983304e-15, -1.5959455978986625e-16, 1.4412082638415313e-14, 2.4712599588222472e-14, 3.001071613439876e-14]\n31 Solução: [0.6424731562156153, 0.4212701341886006, 0.17824679991070286, 0.04060474680117459, 0.011573605691158746, 0.01023518814717131, 0.010208909366488864, 0.01020843268816587, 0.010208423995155522, 0.010208423837287787, 0.010208423834417031, 0.010208423834364908, 0.010208423834363961, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363944, 0.010208423834363937, 0.01020842383436362, 0.01020842383434821, 0.010208423833601163, 0.010208423797384958, 0.010208422041661145, 0.010208336926003357, 0.010204210572710624, 0.0100040850627845, 0.000100081717943578]\n31 Tolerância: 3.6273254240994888\n=====================================================================\n32 Derivada: [-0.13870567951809676, -2.7838600476227775, -0.7775869667050426, -0.5478018611894988, -0.03220922586855392, -0.0007801342748001844, -1.3366346449793232e-05, -2.525812827502971e-07, -4.5349760335877676e-09, -8.353841229880032e-11, -1.5252556162526076e-12, -3.540223669773468e-14, -8.222589276130066e-15, -7.70911112724093e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.702172233337023e-15, -7.674416657721395e-15, -8.125444761475364e-15, -7.896461262646426e-15, -7.716050021144838e-15, -7.167877402736167e-15, -6.696032617270475e-15, -5.113964807179627e-15, -1.3877787807814457e-17, 7.265021917390868e-15, 1.1692686749387171e-14, 1.488045797692905e-14]\n32 Solução: [0.6499047914417454, 0.42021636488909997, 0.18367527103266787, 0.041408202024105216, 0.011672668293665478, 0.010236851371075188, 0.01020894349250749, 0.010208433277480859, 0.010208424006234559, 0.010208423837487124, 0.010208423834420735, 0.010208423834365014, 0.010208423834364001, 0.010208423834363984, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363982, 0.010208423834363975, 0.010208423834363657, 0.010208423834348248, 0.0102084238336012, 0.01020842379738499, 0.010208422041661168, 0.010208336926003357, 0.010204210572710587, 0.010004085062784435, 0.00010008171794350106]\n32 Tolerância: 2.945315338501526\n=====================================================================\n33 Derivada: [-3.362182431485664, 1.0038041726572047, -2.396501169843301, -0.3048817885547877, -0.04641100882452238, -0.0007655190967687936, -1.5979824791560737e-05, -2.743834610283158e-07, -5.171061176478808e-09, -9.296493436172781e-11, -1.711842473328673e-12, -3.1336044870045043e-14, -1.4224732503009818e-15, -4.510281037539698e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.81239525796218e-16, -8.604228440844963e-16, -4.371503159461554e-16, -6.453171330633722e-16, -8.881784197001252e-16, -3.2612801348363973e-16, -7.008282842946301e-16, -3.3306690738754696e-16, 1.249000902703301e-16, 5.273559366969494e-16, 8.694759322247503e-16, 9.367506770274758e-16]\n33 Solução: [0.650565133421873, 0.4334696048619135, 0.18737716210951072, 0.04401614545506108, 0.01182600811408462, 0.010240565389229144, 0.010209007126236926, 0.010208434479955227, 0.01020842402782441, 0.010208423837884829, 0.010208423834427997, 0.010208423834365182, 0.01020842383436404, 0.01020842383436402, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364018, 0.010208423834364013, 0.010208423834363696, 0.010208423834348284, 0.010208423833601234, 0.010208423797385022, 0.010208422041661192, 0.010208336926003357, 0.010204210572710553, 0.01000408506278438, 0.00010008171794343022]\n33 Tolerância: 4.260307365637199\n=====================================================================\n34 Derivada: [0.2747600896652074, -3.216459911745389, -0.6119462436284908, -0.618986134428934, -0.03673752817599613, -0.0009426744440128795, -1.5909300171250418e-05, -3.024205669707736e-07, -5.418959982128602e-09, -9.991605459669373e-11, -1.8162485404538131e-12, -3.3889557826682903e-14, -1.0061396160665481e-15, -2.0816681711721685e-17, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.579669976578771e-16, -4.371503159461554e-16, -6.938893903907228e-18, -2.1510571102112408e-16, -4.579669976578771e-16, -3.3306690738754696e-16, -2.636779683484747e-16, -3.3306690738754696e-16, 1.3183898417423734e-16, 5.342948306008566e-16, 4.3481928127531155e-16, 4.649058915617843e-16]\n34 Solução: [0.6591840092838358, 0.43089637639196704, 0.19352053669040786, 0.04479770277435436, 0.011944981647448264, 0.010242527779491856, 0.010209048090143252, 0.010208435183330799, 0.010208424041080305, 0.010208423838123142, 0.010208423834432385, 0.010208423834365262, 0.010208423834364044, 0.010208423834364022, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.01020842383436402, 0.010208423834364015, 0.010208423834363697, 0.010208423834348286, 0.010208423833601234, 0.010208423797385024, 0.010208422041661192, 0.010208336926003357, 0.010204210572710551, 0.010004085062784378, 0.00010008171794342782]\n34 Tolerância: 3.3436626691861533\n=====================================================================\n35 Derivada: [-2.833910510963676, 0.4073643992272906, -2.1583215461741045, -0.370698032110197, -0.050158269583416494, -0.000909046638899999, -1.8260144648715726e-05, -3.1922285941010653e-07, -5.953305522166108e-09, -1.076531155885796e-10, -1.9758465696906313e-12, -3.6567970873591094e-14, -5.828670879282072e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -2.7755575615628914e-17, -3.5388358909926865e-16, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.0408340855860843e-16, 4.408366033326061e-16, 1.5959455978986625e-16]\n35 Solução: [0.6582784279336209, 0.44149750158937007, 0.195537449358617, 0.04683781820765285, 0.012066064809161142, 0.010245634738718949, 0.01020910052558083, 0.010208436180078273, 0.010208424058940646, 0.010208423838452454, 0.010208423834438372, 0.010208423834365373, 0.010208423834364048, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601236, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784376, 0.00010008171794342629]\n35 Tolerância: 3.6048966898241197\n=====================================================================\n36 Derivada: [-0.004868565218671961, -2.863782482308224, -0.7754763045830919, -0.6221046205542189, -0.042684956597543754, -0.0010716227113988022, -1.854459099950162e-05, -3.483770437961997e-07, -6.273595670736043e-09, -1.1533780180927877e-10, -2.0999833816315316e-12, -3.828187766785618e-14, -5.828670879282072e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, -1.7889335846010823e-18, 9.020562075079397e-17]\n36 Solução: [0.6655430911086987, 0.44045323249955404, 0.20107025605657308, 0.04778809392473221, 0.012194644357653788, 0.010247965058471989, 0.010209147335033664, 0.01020843699839859, 0.010208424074201805, 0.010208423838728421, 0.010208423834443437, 0.010208423834365467, 0.01020842383436405, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342588]\n36 Tolerância: 3.0317444847853676\n=====================================================================\n37 Derivada: [-3.0690943519830114, 0.7252732575863234, -2.314019744208323, -0.37753281734250044, -0.057805938466077594, -0.0010616661780993297, -2.145515883822391e-05, -3.7561857003237664e-07, -7.001076128554384e-09, -1.2655375358772147e-10, -2.3224443201907974e-12, -4.2452152904104423e-14, -1.033895191682177e-15, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, -3.2526065174565133e-19, 1.734723475976807e-17]\n37 Solução: [0.6655627032488618, 0.4519894656436179, 0.20419412300618758, 0.050294130604210875, 0.012366593035353854, 0.010252281898007653, 0.010209222038586274, 0.010208438401772912, 0.010208424099473859, 0.01020842383919304, 0.010208423834451898, 0.010208423834365621, 0.010208423834364051, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342551]\n37 Tolerância: 3.9301300867042426\n=====================================================================\n38 Derivada: [0.2568165162190894, -3.1273311840766738, -0.6754952976442428, -0.6784133076721397, -0.04812364889383909, -0.0012603130584689023, -2.1729184377039523e-05, -4.0960227536135374e-07, -7.379967026377443e-09, -1.3561423511943005e-10, -2.4685913035948914e-12, -4.51166881632048e-14, -6.106226635438361e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, -1.6263032587282567e-19, 1.0408340855860843e-17]\n38 Solução: [0.6734302546882713, 0.45013024464638735, 0.21012605838562787, 0.05126192713304297, 0.01251477720378496, 0.01025500345437241, 0.0102092770383831, 0.010208439364662312, 0.010208424117420954, 0.010208423839517457, 0.010208423834457851, 0.01020842383436573, 0.010208423834364055, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342547]\n38 Tolerância: 3.281007626554935\n=====================================================================\n39 Derivada: [-2.8255628356434244, 0.4702071577743112, -2.219243599333755, -0.4220876651309578, -0.06357399299638772, -0.001236987505115604, -2.4594114526252375e-05, -4.348087361064068e-07, -8.077289406771548e-09, -1.4645125390178038e-10, -2.6829614918622013e-12, -4.8828996401795166e-14, -6.106226635438361e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n39 Solução: [0.6725838135337406, 0.4604376106095307, 0.21235242228167212, 0.053497908493778785, 0.012673387862980964, 0.01025915731821258, 0.010209348655567937, 0.010208440714669812, 0.010208424141744575, 0.010208423839964427, 0.010208423834465987, 0.01020842383436588, 0.010208423834364056, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n39 Tolerância: 3.6485810177327433\n=====================================================================\n40 Derivada: [0.15712120521986606, -2.9809374267460953, -0.7483066389031876, -0.6984728611763468, -0.05469550306369939, -0.0014407224205991234, -2.512752178406713e-05, -4.716176804087091e-07, -8.52960403002756e-09, -1.5667974556654585e-10, -2.8541440044715927e-12, -5.199313202197686e-14, -1.061650767297806e-15, -4.718447854656915e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n40 Solução: [0.6798270776387836, 0.4592322455810565, 0.21804140123504234, 0.05457992033066234, 0.012836358304011744, 0.010262328306690048, 0.0102094117020041, 0.010208441829291816, 0.010208424162450517, 0.010208423840339851, 0.010208423834472865, 0.010208423834366005, 0.010208423834364058, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n40 Tolerância: 3.156183748342281\n=====================================================================\n41 Derivada: [-2.7054200623485656, 0.3648031159348477, -2.188762825363156, -0.4571124920620178, -0.07032857943254606, -0.0014327697702044423, -2.822917192717156e-05, -5.02105307401135e-07, -9.31144227001024e-09, -1.6924362605807985e-10, -3.100485146401155e-12, -5.6239735091168086e-14, -1.0824674490095276e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n41 Solução: [0.6793092221040014, 0.46905711258815425, 0.22050774391697422, 0.05688201594244962, 0.013016629127097668, 0.01026707678146497, 0.010209494519763888, 0.010208443383695791, 0.010208424190563226, 0.010208423840856252, 0.010208423834482273, 0.010208423834366176, 0.010208423834364062, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n41 Tolerância: 3.5294421267327922\n=====================================================================\n42 Derivada: [0.14370967872713436, -2.9327465658435585, -0.7779532640633695, -0.7272430067849818, -0.061569796068162486, -0.0016508667155735943, -2.8989736378243203e-05, -5.428206973565053e-07, -9.841884411609936e-09, -1.8082377262196303e-10, -3.2987328146827366e-12, -6.081940506774686e-14, -1.096345236817342e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n42 Solução: [0.6862445030255492, 0.4681219483505283, 0.22611858612066396, 0.05805381310227657, 0.013196914792146918, 0.010270749653190348, 0.010209566884584501, 0.010208444670830978, 0.010208424214432889, 0.010208423841290105, 0.010208423834490221, 0.01020842383436632, 0.010208423834364065, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n42 Tolerância: 3.1240265890675514\n=====================================================================\n43 Derivada: [-2.68750550831993, 0.3801492088044327, -2.208338993825734, -0.4837524534610598, -0.07820732962849282, -0.0016531350749737841, -3.2434253369378885e-05, -5.788405091355919e-07, -1.0725769529484896e-08, -1.9529624445402405e-10, -3.58015006529655e-12, -6.51839693333045e-14, -1.1032841307212493e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n43 Solução: [0.6857708505199788, 0.4777879831744756, 0.22868264106813846, 0.060450732192021996, 0.013399842586805169, 0.010276190742218728, 0.010209662431811333, 0.010208446459912866, 0.01020842424687074, 0.010208423841886081, 0.010208423834501093, 0.010208423834366522, 0.010208423834364069, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n43 Tolerância: 3.5332840172283015\n=====================================================================\n44 Derivada: [0.2067278766625691, -2.972970881049797, -0.7677633342401604, -0.7635390668061639, -0.06880637591830542, -0.0018968291641175625, -3.3397102046174576e-05, -6.24641883550936e-07, -1.1341703388956237e-08, -2.0845981885120324e-10, -3.8073780239678e-12, -6.9409755720784e-14, -1.1171619185290638e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n44 Solução: [0.6926602079021466, 0.4768134795874525, 0.23434366632086556, 0.061690820268521296, 0.013600325243323523, 0.01028042851523807, 0.010209745576259668, 0.010208447943756945, 0.010208424274366, 0.010208423842386719, 0.010208423834510271, 0.010208423834366688, 0.010208423834364072, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n44 Tolerância: 3.171511096456366\n=====================================================================\n45 Derivada: [-2.7687485212525758, 0.5125766545429542, -2.276973379280369, -0.501223679120439, -0.08727875015786035, -0.0019031575382258276, -3.732248476957106e-05, -6.666061929880285e-07, -1.2348175947030082e-08, -2.250849298612856e-10, -4.128655750612609e-12, -7.521067102445045e-14, -1.5751289161869408e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n45 Solução: [0.6919788538164667, 0.4866120896690375, 0.2368741362945575, 0.06420736748577793, 0.013827104070202703, 0.01028668027151629, 0.010209855649716118, 0.010208450002513152, 0.010208424311747101, 0.010208423843073782, 0.01020842383452282, 0.010208423834366917, 0.010208423834364076, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n45 Tolerância: 3.6567960499342056\n=====================================================================\n46 Derivada: [0.3516394438973691, -3.1084118306072455, -0.7143698395914289, -0.8080268261321193, -0.07630928947761192, -0.0021848243246595234, -3.846077066729098e-05, -7.190502974291357e-07, -1.3059469394782752e-08, -2.4011349158969963e-10, -4.389603264209896e-12, -8.087280845003875e-14, -1.5959455978986625e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n46 Solução: [0.6990764757581542, 0.485298111428632, 0.24271110418577915, 0.0654922426397732, 0.01405084110063667, 0.010291558971260276, 0.010209951325031079, 0.010208451711342504, 0.01020842434340136, 0.010208423843650782, 0.010208423834533404, 0.01020842383436711, 0.010208423834364079, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n46 Tolerância: 3.3098238486705336\n=====================================================================\n47 Derivada: [-2.964319136517389, 0.7805322410096167, -2.403189935771712, -0.5073107683951112, -0.09770604516409537, -0.0021854544217781782, -4.302013743694849e-05, -7.675837455159429e-07, -1.4215728733057986e-08, -2.592039707871585e-10, -4.756937899141889e-12, -8.684025720739896e-14, -1.6167622796103842e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n47 Solução: [0.6979175078644495, 0.49554312112423693, 0.24506559462388566, 0.06815541699348013, 0.014302348768592666, 0.010298759930338134, 0.010210078087825026, 0.010208454081259257, 0.010208424386444044, 0.010208423844442172, 0.010208423834547871, 0.010208423834367377, 0.010208423834364084, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n47 Tolerância: 3.9292054991851386\n=====================================================================\n48 Derivada: [0.5994740586913281, -3.3638576509771383, -0.6063577412699477, -0.8631072733391593, -0.08390565908538203, -0.002521853000685248, -4.427313915567749e-05, -8.284432521724061e-07, -1.503626952809034e-08, -2.764673698307796e-10, -5.056014634741146e-12, -9.262035582935368e-14, -1.6306400674181987e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n48 Solução: [0.705516470494682, 0.49354224501813315, 0.25122611569947234, 0.06945589625816487, 0.014552815925385392, 0.010304362291526774, 0.010210188368939062, 0.010208456048942199, 0.010208424422885732, 0.010208423845106635, 0.010208423834560066, 0.010208423834367599, 0.010208423834364088, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n48 Tolerância: 3.576950913851089\n=====================================================================\n49 Derivada: [-2.4443382316725604, 0.19731328901531953, -2.1580493199530943, -0.5786799827234648, -0.10377578415779981, -0.002503347531139573, -4.8459681890919426e-05, -8.715377080442677e-07, -1.6077481602982946e-08, -2.9355823327748e-10, -5.384328399804517e-12, -9.846290449644357e-14, -2.0816681711721685e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n49 Solução: [0.7039797327954, 0.5021654152659993, 0.2527804995577083, 0.07166845152429309, 0.014767906115911883, 0.010310827002588101, 0.010210301862093636, 0.01020845817263706, 0.010208424461430856, 0.010208423845815353, 0.010208423834573026, 0.010208423834367836, 0.010208423834364091, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n49 Tolerância: 3.3191181053208325\n=====================================================================\n50 Derivada: [0.2129549811306788, -2.889024678989202, -0.8173417847176179, -0.8517242614408302, -0.09406950296912092, -0.0028185783411505266, -5.0342214706143584e-05, -9.366019479983834e-07, -1.7041374922310837e-08, -3.1287543389435513e-10, -5.72442093726977e-12, -1.0481199241851868e-13, -2.1024848528838902e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n50 Solução: [0.7102457365631154, 0.5016596072741388, 0.25831260841012715, 0.0731518840971926, 0.015033932906355463, 0.010317244275311969, 0.010210426087352389, 0.010208460406803547, 0.010208424502645103, 0.010208423846567883, 0.01020842383458683, 0.01020842383436809, 0.010208423834364096, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n50 Tolerância: 3.1295610313073103\n=====================================================================\n51 Derivada: [-2.7745528232005086, 0.629532041728794, -2.3559660453577482, -0.5720414211577921, -0.11640816846732327, -0.002863182676961333, -5.585027973879775e-05, -1.003150137235398e-06, -1.8527433247317315e-08, -3.380796961827137e-10, -6.2001306866399375e-12, -1.1330519855690113e-13, -2.1371793224034263e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n51 Solução: [0.709543858573549, 0.5111815391995183, 0.2610064839212814, 0.07595908075965628, 0.01534397643420779, 0.010326534023262538, 0.01021059201017918, 0.010208463493748444, 0.010208424558811744, 0.01020842384759909, 0.010208423834605697, 0.010208423834368435, 0.010208423834364103, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n51 Tolerância: 3.7397620803084797\n=====================================================================\n52 Derivada: [0.588158388919453, -3.292183300001767, -0.6414460276808835, -0.9215480554422024, -0.10230708926025286, -0.003259517607234308, -5.783522014311815e-05, -1.0788174998402345e-06, -1.962569181473972e-08, -3.6044846274396747e-10, -6.592792284321192e-12, -1.2069512056456233e-13, -2.157996004115148e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n52 Solução: [0.7166563597072416, 0.5095677485652037, 0.2670459476606018, 0.07742549553557347, 0.015642386045757326, 0.010333873724949084, 0.010210735181062299, 0.01020846606530031, 0.010208424606306384, 0.010208423848465749, 0.01020842383462159, 0.010208423834368725, 0.010208423834364109, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n52 Tolerância: 3.5292467134641856\n=====================================================================\n53 Derivada: [-2.4490049435198102, 0.2755864046532608, -2.2069606110297872, -0.6261691041653066, -0.12466825136000023, -0.0032600521906022, -6.30376519733869e-05, -1.1368727202282214e-06, -2.095868995022343e-08, -3.8296081583677477e-10, -7.020273251612252e-12, -1.2904260993096273e-13, -2.1788126858268697e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n53 Solução: [0.7151486294622088, 0.5180071832942121, 0.26869027951867047, 0.07978786237691701, 0.01590464787125358, 0.010342229421940284, 0.010210883440293623, 0.010208468830823686, 0.010208424656616385, 0.010208423849389749, 0.01020842383463849, 0.010208423834369033, 0.010208423834364114, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n53 Tolerância: 3.369256412070001\n=====================================================================\n54 Derivada: [0.35346951351982625, -2.9927632165339304, -0.774056279647878, -0.9244773363792752, -0.11313414446960394, -0.0036592294007893114, -6.563314595272457e-05, -1.2199485269204224e-06, -2.2222899319357392e-08, -4.080024720853892e-10, -7.466246371157226e-12, -1.365921264984138e-13, -2.643718577388654e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n54 Solução: [0.7214265962363685, 0.5173007240049399, 0.274347771319406, 0.08139303219960639, 0.0162242320117028, 0.01035058648932342, 0.010211045035837012, 0.010208471745170259, 0.010208424710343496, 0.01020842385037146, 0.010208423834656486, 0.010208423834369365, 0.010208423834364119, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n54 Tolerância: 3.247800265669299\n=====================================================================\n55 Derivada: [-2.2371470555507074, 0.05999898439267071, -2.1185039028754584, -0.6700053692124277, -0.13400003074945183, -0.003701143795728973, -7.118003267404804e-05, -1.2880485487296034e-06, -2.3715176621086353e-08, -4.3356890300216833e-10, -7.947992958223793e-12, -1.4555717742226193e-13, -3.1086244689504383e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n55 Solução: [0.7205204854229021, 0.5249726023676368, 0.2763320464503393, 0.0837629081839771, 0.01651424873946912, 0.010359966838129155, 0.010211213284868386, 0.010208474872479716, 0.010208424767311378, 0.010208423851417365, 0.010208423834675625, 0.010208423834369715, 0.010208423834364126, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n55 Tolerância: 3.15647945016816\n=====================================================================\n56 Derivada: [0.9274932900507338, -3.6250833570511602, -0.4946396061370244, -1.0161549790712137, -0.1215148501213881, -0.004242674606849392, -7.531692729529499e-05, -1.4058659442012233e-06, -2.5576642602442856e-08, -4.698421096627214e-10, -8.597463019288654e-12, -1.5727003033205733e-13, -2.706168622523819e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n56 Solução: [0.7278938949077495, 0.5247748518087254, 0.2833144201536642, 0.08597117783348095, 0.01695589923144119, 0.01037216543218246, 0.010211447887026858, 0.010208479117756915, 0.010208424845474191, 0.010208423852846364, 0.010208423834701821, 0.010208423834370196, 0.010208423834364136, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n56 Tolerância: 3.9106900024000257\n=====================================================================\n57 Derivada: [-1.7247783887485468, -0.5137097409356315, -1.8689825291613644, -0.7466855969140582, -0.14214530938361877, -0.004223702485588211, -8.025844638773899e-05, -1.4595605362238695e-06, -2.681696056883176e-08, -4.907631384609701e-10, -8.993781414057267e-12, -1.6465301344581462e-13, -3.1710745140856034e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n57 Solução: [0.7261956039713773, 0.5314125776822322, 0.28422013232310456, 0.08783181317113575, 0.017178399567356818, 0.01037993400140887, 0.01021158579643963, 0.010208481691974343, 0.010208424892306422, 0.01020842385370667, 0.010208423834717564, 0.010208423834370484, 0.010208423834364141, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n57 Tolerância: 2.7036302258743707\n=====================================================================\n58 Derivada: [1.9676655228850564, -4.771287424434391, 0.015343954355511613, -1.1768998505707944, -0.13456053437128782, -0.005255534213301952, -9.065737227578058e-05, -1.7016741948941427e-06, -3.087037360460787e-08, -5.676414824296039e-10, -1.0384970661192483e-11, -1.905489654951964e-13, -3.247402347028583e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 1.0408340855860843e-16, -2.706168622523819e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n58 Solução: [0.7369333600536742, 0.5346107218213578, 0.29585564367408473, 0.0924803685230177, 0.018063337406537063, 0.010406229024207332, 0.010212085452294828, 0.010208490778593891, 0.010208425059257715, 0.01020842385676196, 0.010208423834773555, 0.010208423834371509, 0.01020842383436416, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385024, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n58 Tolerância: 5.295314358751226\n=====================================================================\n59 Derivada: [-2.1682703132904635, 0.07072628025811412, -2.130755743298712, -0.7405326263159905, -0.16643416343487094, -0.005142777782227541, -9.787033355105473e-05, -1.7629174306615925e-06, -3.240353087935466e-08, -5.926913126952371e-10, -1.086490619695013e-11, -1.9905604942138666e-13, -3.7192471324942744e-15, -3.469446951953614e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 1.0408340855860843e-16, -2.706168622523819e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n59 Solução: [0.7333304568745633, 0.5433472100252782, 0.2958275480545373, 0.09463533651112341, 0.018309725103750115, 0.010415852194763915, 0.010212251450901289, 0.010208493894452402, 0.010208425115783057, 0.010208423857801342, 0.010208423834792571, 0.010208423834371858, 0.010208423834364166, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385024, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n59 Tolerância: 3.134108127515398\n=====================================================================\n60 Derivada: [1.0187376315879533, -3.663470865338951, -0.46786335128754253, -1.1006438555461344, -0.1513957720955103, -0.005864940237374973, -0.000104688849982916, -1.930342718607647e-06, -3.498707078219976e-08, -6.420718262623026e-10, -1.1752234602147027e-11, -2.1546653350412726e-13, -3.760880495917718e-15, -4.787836793695988e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n60 Solução: [0.740476855612215, 0.5431141033886853, 0.3028503025795697, 0.09707605683711605, 0.018858275202961725, 0.010432802268020768, 0.010212574021580717, 0.010208499704849206, 0.010208425222581804, 0.010208423859754791, 0.010208423834828381, 0.010208423834372513, 0.010208423834364178, 0.010208423834364024, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n60 Tolerância: 3.9889990277920715\n=====================================================================\n61 Derivada: [-1.785856974332944, -0.35379637865571567, -1.9446391082032548, -0.800394100806066, -0.1755863378635677, -0.005845403053177434, -0.00011157878501596302, -2.0136842809187483e-06, -3.673771629270073e-08, -6.709229699586317e-10, -1.2292965256843758e-11, -2.254862963013693e-13, -4.232725281383409e-15, -4.787836793695988e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n61 Solução: [0.7386114912965632, 0.5498221188891839, 0.3037069859620542, 0.09909139592808187, 0.01913548914112489, 0.010443541294334321, 0.010212765712590208, 0.01020850323941229, 0.010208425286645045, 0.01020842386093046, 0.0102084238348499, 0.010208423834372907, 0.010208423834364185, 0.010208423834364025, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n61 Tolerância: 2.7870367294525007\n=====================================================================\n62 Derivada: [1.4881604192082136, -4.173937663547775, -0.23323645558846096, -1.189425738920733, -0.1639988070259841, -0.006874367932965746, -0.00012282453354233586, -2.285901778330268e-06, -4.125958971007426e-08, -7.527748840940873e-10, -1.37495535790233e-11, -2.515904151678683e-13, -4.773959005888173e-15, -4.85722573273506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n62 Solução: [0.7471134959351112, 0.5515064522348349, 0.3129649114039398, 0.10290186589041544, 0.019971410427340606, 0.010471369751252525, 0.010213296910419263, 0.010208512826043998, 0.01020842546154384, 0.010208423864124551, 0.010208423834908425, 0.01020842383437398, 0.010208423834364206, 0.010208423834364027, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n62 Tolerância: 4.597004760258003\n=====================================================================\n63 Derivada: [-2.0103568967042236, -0.041191789639569265, -2.0849807191870102, -0.8047453314185804, -0.19514680738201612, -0.006799359845237642, -0.00013105672842981392, -2.37607670430795e-06, -4.3395002265189664e-08, -7.883954392107206e-10, -1.4393791714084614e-11, -2.6277591214096674e-13, -4.8155923693116165e-15, -4.85722573273506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n63 Solução: [0.744388592823768, 0.5591491603590069, 0.3133919801092409, 0.1050797694650994, 0.020271701211689943, 0.010483957094879781, 0.010213521808857146, 0.010208517011655165, 0.010208425537092405, 0.010208423865502922, 0.010208423834933601, 0.010208423834374442, 0.010208423834364214, 0.010208423834364027, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n63 Tolerância: 3.0126599191393546\n=====================================================================\n64 Derivada: [0.9253194583756965, -3.4988630016395774, -0.5280572873236764, -1.1530359898716742, -0.18067171653780648, -0.007661058293572075, -0.00013962327491825327, -2.5904636442031226e-06, -4.7059024205209354e-08, -8.580096563792772e-10, -1.5612632714434582e-11, -2.8580610100803483e-13, -5.322131624296844e-15, -4.85722573273506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n64 Solução: [0.7510145249784328, 0.5592849243141177, 0.320263864803827, 0.10773212834550733, 0.020914885269223443, 0.0105063670943697, 0.010213953758523601, 0.010208524842962662, 0.010208425680117926, 0.010208423868101394, 0.010208423834981042, 0.010208423834375308, 0.01020842383436423, 0.010208423834364027, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n64 Tolerância: 3.839179062950602\n=====================================================================\n65 Derivada: [-1.763439618994397, -0.3033967231175012, -1.967780289788159, -0.8528250047835216, -0.2070514237815546, -0.0076734779008670664, -0.0001480036302455648, -2.695972276027525e-06, -4.936941732508471e-08, -8.988451094538163e-10, -1.6368229688312752e-11, -2.9858060468512804e-13, -5.349887199912473e-15, -4.85722573273506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n65 Solução: [0.749320214446739, 0.5656915338141901, 0.32123076657504956, 0.10984340029961806, 0.021245705062688663, 0.010520394911069356, 0.010214209416375625, 0.01020852958624326, 0.010208425766285573, 0.010208423869672457, 0.01020842383500963, 0.010208423834375832, 0.01020842383436424, 0.010208423834364027, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n65 Tolerância: 2.800744153857281\n=====================================================================\n66 Derivada: [1.6360782147376085, -4.296737126570818, -0.15700512081184748, -1.2723993653241912, -0.19353618544188678, -0.009012729123588137, -0.0001625114358166438, -3.034002039804806e-06, -5.525007532036774e-08, -1.0114444559672364e-09, -1.841516175660196e-11, -3.353914368453559e-13, -5.904998712225051e-15, -4.85722573273506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n66 Solução: [0.7577154958359946, 0.5671359273934848, 0.33059886121637505, 0.11390348027844586, 0.022231423510867453, 0.010556926361036863, 0.01021491402350204, 0.010208542421072211, 0.010208426001320641, 0.010208423873951626, 0.010208423835087556, 0.010208423834377253, 0.010208423834364266, 0.010208423834364027, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n66 Tolerância: 4.777016864446935\n=====================================================================\n67 Derivada: [-2.1212355565341454, 0.1711378498127658, -2.17982773772939, -0.8399811272029168, -0.23062041294974192, -0.008922065105607807, -0.00017334537264583616, -3.1500772413872724e-06, -5.7907687936453733e-08, -1.0579049025682963e-09, -1.9302143217103662e-11, -3.516353874744027e-13, -5.946632075648495e-15, -4.926614671774132e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n67 Solução: [0.7547197471517827, 0.5750034880500476, 0.3308863461787991, 0.11623331310069475, 0.02258579885042169, 0.010573429160945776, 0.010215211590828364, 0.010208547976495869, 0.01020842610248655, 0.010208423875803636, 0.010208423835121275, 0.010208423834377867, 0.010208423834364277, 0.010208423834364027, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n67 Tolerância: 3.168502977487662\n=====================================================================\n68 Derivada: [0.5160061839963532, -2.9650966292579426, -0.7531818174663023, -1.1627429826544784, -0.21458106837444335, -0.009805185198120375, -0.00018221188123545423, -3.367227085847302e-06, -6.149469580774003e-08, -1.1271908822996224e-09, -2.058033257701375e-11, -3.7481129311345285e-13, -6.897260540483785e-15, -5.551115123125783e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n68 Solução: [0.7601574847844996, 0.574564780183096, 0.3364742834947558, 0.11838658503322176, 0.023176988873852424, 0.010596300665733102, 0.010215655957628359, 0.010208556051645046, 0.010208426250931551, 0.01020842387851555, 0.010208423835170757, 0.010208423834378769, 0.010208423834364292, 0.010208423834364029, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n68 Tolerância: 3.3201589881220093\n=====================================================================\n69 Derivada: [-2.4054032485016705, 0.5506571752259788, -2.352700690694199, -0.8241898820805184, -0.2485983460914346, -0.009989276937693296, -0.00019640573793271165, -3.567133022769342e-06, -6.553449398427613e-08, -1.1988626272274594e-09, -2.1935269572948002e-11, -3.999994779846361e-13, -7.417677583276827e-15, -5.551115123125783e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n69 Solução: [0.7588347150257199, 0.5821657358977465, 0.3384050474311318, 0.12136724941746786, 0.02372706241338652, 0.010621436028179455, 0.010216123053515316, 0.010208564683452761, 0.010208426408571763, 0.010208423881405077, 0.010208423835223513, 0.01020842383437973, 0.01020842383436431, 0.010208423834364029, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n69 Tolerância: 3.516472462023296\n=====================================================================\n70 Derivada: [0.898512858384521, -3.396401133961419, -0.546323619906179, -1.2345906234316681, -0.2258457255674316, -0.011049418411419654, -0.00020565828153743665, -3.8143762745926346e-06, -6.961673652133848e-08, -1.2764499041084676e-09, -2.3352812739130258e-11, -4.267905473476219e-13, -7.931155732165962e-15, -5.551115123125783e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n70 Solução: [0.7650009098766153, 0.5807541391350822, 0.34443614051030397, 0.12348004086323092, 0.024364338447068177, 0.010647043305485554, 0.010216626535021247, 0.010208573827714661, 0.010208426576567902, 0.010208423884478334, 0.010208423835279744, 0.010208423834380755, 0.010208423834364329, 0.010208423834364029, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n70 Tolerância: 3.7705010208596565\n=====================================================================\n71 Derivada: [-1.7744239746741926, -0.18488857659004054, -2.0156115416614035, -0.9166783585234042, -0.2566513703574119, -0.011108480787442747, -0.00021735654418255507, -3.970652750484582e-06, -7.290153640482355e-08, -1.3342671412175378e-09, -2.44311341990322e-11, -4.4680925626039425e-13, -7.98666688339722e-15, -5.551115123125783e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -6.938893903907228e-18, -2.220446049250313e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n71 Solução: [0.7633556836954913, 0.5869731353520526, 0.34543648893542517, 0.125740643811409, 0.02477787432152026, 0.010667275394861932, 0.01021700310658168, 0.010208580812046219, 0.010208426704039954, 0.010208423886815584, 0.010208423835322505, 0.010208423834381536, 0.010208423834364343, 0.010208423834364029, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363697, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n71 Tolerância: 2.8551263893262684\n=====================================================================\n72 Derivada: [1.3772471437975753, -3.930617749578545, -0.29086420135629254, -1.3238437201576447, -0.24034324071945576, -0.012710432328929648, -0.00023535848401979448, -4.3826450804584915e-06, -8.007923646413673e-08, -1.4693900887707834e-09, -2.6895468491217045e-11, -4.922590113309866e-13, -9.006684287271582e-15, -5.551115123125783e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n72 Solução: [0.7705036318356584, 0.5877179257606795, 0.35355601785080926, 0.129433317863078, 0.025811748249961984, 0.010712023913659004, 0.010217878688363664, 0.010208596807107347, 0.010208426997710695, 0.010208423892190439, 0.010208423835420921, 0.010208423834383336, 0.010208423834364376, 0.010208423834364029, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n72 Tolerância: 4.386531232226017\n=====================================================================\n73 Derivada: [-2.036565327691193, 0.17446050727870954, -2.176497627498751, -0.9081548020482856, -0.27989422806362707, -0.01268539194880541, -0.0002498751374945679, -4.555398921378406e-06, -8.382052531924478e-08, -1.5354956334201475e-09, -2.8132947360592908e-11, -5.150047055479945e-13, -9.076073226310655e-15, -4.996003610813204e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n73 Solução: [0.7679818169971618, 0.594915101815816, 0.35408860611012866, 0.13185734811239008, 0.026251829867490285, 0.010735297410355041, 0.010218309642619071, 0.010208604831970164, 0.010208427144340157, 0.010208423894880973, 0.010208423835470168, 0.010208423834384238, 0.010208423834364393, 0.010208423834364029, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n73 Tolerância: 3.133436393175311\n=====================================================================\n74 Derivada: [0.5899922675780545, -2.978251990426756, -0.7195333374990511, -1.2478823897186424, -0.2616521553908209, -0.013885037277745014, -0.000262918023094659, -4.8618266730096615e-06, -8.896806260455525e-08, -1.6336592564836216e-09, -2.992888228914303e-11, -5.478464903951874e-13, -1.0054457266761574e-14, -6.245004513516506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n74 Solução: [0.7732025044826983, 0.5944678763943252, 0.3596680067665586, 0.1341853816625627, 0.026969332161110422, 0.01076781611530193, 0.01021895019167759, 0.010208616509628532, 0.010208427359212109, 0.01020842389881718, 0.010208423835542286, 0.010208423834385558, 0.010208423834364416, 0.01020842383436403, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n74 Tolerância: 3.3707075207522323\n=====================================================================\n75 Derivada: [-2.4929514816689107, 0.7680497604526693, -2.447811241821757, -0.8683348563644484, -0.3025910063942625, -0.014164225954737285, -0.0002830529840662163, -5.149584218246128e-06, -9.471103805391845e-08, -1.735888661980045e-09, -3.1841008996114084e-11, -5.832764826685377e-13, -1.0609568779074152e-14, -6.245004513516506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n75 Solução: [0.7716900731327058, 0.6021025555690032, 0.3615125136131748, 0.13738429892136292, 0.0276400713289824, 0.010803410082932868, 0.010219624175867652, 0.01020862897280726, 0.010208427587279653, 0.010208423903005028, 0.010208423835619007, 0.010208423834386963, 0.010208423834364442, 0.01020842383436403, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n75 Tolerância: 3.693542316726068\n=====================================================================\n76 Derivada: [0.1347956310655718, -2.4131807938600787, -0.9715245302265352, -1.2112583603398106, -0.2807196318302258, -0.015216992118203557, -0.00029266044275578357, -5.401650471838215e-06, -9.888299023347313e-08, -1.8147918376398842e-09, -3.327793249296995e-11, -6.097830573814633e-13, -1.1574075031717257e-14, -6.245004513516506e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n76 Solução: [0.7762548036289257, 0.6006962144548931, 0.36599458986162775, 0.13897426753042869, 0.028194132009635955, 0.010829345555262099, 0.010220142461360937, 0.01020863840197758, 0.010208427760700743, 0.010208423906183536, 0.01020842383567731, 0.010208423834388032, 0.01020842383436446, 0.01020842383436403, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n76 Tolerância: 2.8864603389892345\n=====================================================================\n77 Derivada: [-3.1444781647310265, 1.618037907978433, -2.8393223662070426, -0.8046335193874148, -0.33278969461465124, -0.016090511225633736, -0.00032555424770337044, -5.913784433299318e-06, -1.0888079248622118e-07, -1.994081809397752e-09, -3.6591538826735714e-11, -6.70762057009e-13, -1.2205514376972815e-14, -5.065392549852277e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n77 Solução: [0.775711803650268, 0.6104172796645345, 0.36990820186083134, 0.143853604187071, 0.029324960604655177, 0.01089064447370701, 0.010221321391367156, 0.010208660161555897, 0.010208428159033101, 0.0102084239134941, 0.010208423835811364, 0.010208423834390488, 0.010208423834364507, 0.01020842383436403, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n77 Tolerância: 4.618006517027075\n=====================================================================\n78 Derivada: [0.5757358111048632, -2.916772330881031, -0.7219949182623502, -1.297423314192485, -0.29727662237744135, -0.017479923437436363, -0.00033606454260170365, -6.207385639514507e-06, -1.1363044911949505e-07, -2.08533995627036e-09, -3.823675057135212e-11, -7.010295122178434e-13, -1.2739809207573671e-14, -5.134781488891349e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n78 Solução: [0.7814695151335402, 0.6074545637685779, 0.3751071563887983, 0.14532693216446493, 0.029934316734931028, 0.01092010707971098, 0.010221917498998449, 0.010208670990018605, 0.010208428358399786, 0.010208423917145371, 0.010208423835878366, 0.010208423834391717, 0.01020842383436453, 0.010208423834364032, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n78 Tolerância: 3.3365094820039167\n=====================================================================\n79 Derivada: [-2.4809686046829427, 0.8291523635358402, -2.471138179685319, -0.902876836141091, -0.3419289234045709, -0.01784985051331158, -0.000361648603021264, -6.579834134629037e-06, -1.209431060231192e-07, -2.216505465280072e-09, -4.066092254562115e-11, -7.455910888687356e-13, -1.3329615189405786e-14, -6.938893903907228e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n79 Solução: [0.779993629875581, 0.6149316412769399, 0.376957973440008, 0.14865284642203844, 0.030696378388974762, 0.010964916453757141, 0.010222778992576896, 0.010208686902506207, 0.010208428649688779, 0.010208423922491092, 0.010208423835976385, 0.010208423834393514, 0.010208423834364563, 0.010208423834364034, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n79 Tolerância: 3.7258077621336145\n=====================================================================\n80 Derivada: [0.2230545172344307, -2.4675221232251943, -0.9246181652136194, -1.268897064969308, -0.31674045201378964, -0.01914538706938393, -0.00037411963368019774, -6.902615739837192e-06, -1.2630054645190736e-07, -2.3173255883635058e-09, -4.249420607060905e-11, -7.79071251955088e-13, -1.4307999229856705e-14, -6.938893903907228e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n80 Solução: [0.784536419068726, 0.6134134179550359, 0.381482762587381, 0.15030606328508975, 0.03132246894696653, 0.010997600506210715, 0.010223441190946685, 0.010208698950542341, 0.01020842887114222, 0.010208423926549635, 0.010208423836050837, 0.010208423834394879, 0.010208423834364587, 0.010208423834364034, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n80 Tolerância: 2.9502759970884553\n=====================================================================\n81 Derivada: [-2.6902826157524657, 1.1283053877488243, -2.607778018038303, -0.8913240375973908, -0.36494195629269854, -0.019950753421987714, -0.00040839629513748227, -7.441555625965768e-06, -1.3679274679656572e-07, -2.505121103613117e-09, -4.59553610443475e-11, -8.431588760515751e-13, -1.5827616994812388e-14, -5.134781488891349e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n81 Solução: [0.7838012540338957, 0.6215461202654705, 0.38453021015339267, 0.15448821913887043, 0.032366413307851824, 0.011060701757538031, 0.01022467425126277, 0.010208721700862773, 0.010208429287415994, 0.010208423934187305, 0.010208423836190894, 0.010208423834397446, 0.010208423834364634, 0.010208423834364034, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n81 Tolerância: 4.029793935534847\n=====================================================================\n82 Derivada: [0.4010865732344371, -2.6583040744578312, -0.8218422438049693, -1.3162791785350407, -0.33392794683988286, -0.021459424999286117, -0.000422246990293397, -7.802582943632064e-06, -1.4284821289733962e-07, -2.6198272579103232e-09, -4.802769640321891e-11, -8.809133977827344e-13, -1.5952517085082718e-14, -8.326672684688674e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n82 Solução: [0.788727308628169, 0.6194801313963015, 0.38930519431728117, 0.15612028219599455, 0.03303464198758699, 0.01109723267811052, 0.01022542204721334, 0.010208735326758084, 0.010208429537890994, 0.010208423938774318, 0.01020842383627504, 0.01020842383439899, 0.010208423834364664, 0.010208423834364036, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n82 Tolerância: 3.1220882594844657\n=====================================================================\n83 Derivada: [-2.2598487392642426, 0.6262117284859698, -2.368756755122714, -0.961998852858533, -0.3774005454667227, -0.02202675074486245, -0.00045275614406173337, -8.270416106057055e-06, -1.5203220027348863e-07, -2.7855905845863838e-09, -5.107773926038561e-11, -9.366535325128211e-13, -1.6563139748626554e-14, -8.326672684688674e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n83 Solução: [0.787699132598149, 0.6262946315871725, 0.3914119676473476, 0.15949453301987587, 0.033890658452874776, 0.011152243411140916, 0.010226504467476544, 0.010208755328496587, 0.010208429904079039, 0.010208423945490184, 0.010208423836398159, 0.010208423834401249, 0.010208423834364705, 0.010208423834364036, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n83 Tolerância: 3.489759929509722\n=====================================================================\n84 Derivada: [0.1721334825360259, -2.3563910887104527, -0.9549224779593004, -1.3031016358148342, -0.35347746307652606, -0.023505744400660132, -0.0004688801380715729, -8.663825158856941e-06, -1.5876123167501976e-07, -2.9125734671286985e-09, -5.339002157045414e-11, -9.790918076291177e-13, -1.8006429680639258e-14, -5.273559366969494e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n84 Solução: [0.7918370392252199, 0.6251480036663608, 0.3957492908073623, 0.16125600552877212, 0.034581699490716676, 0.01119257559634269, 0.010227333488736423, 0.010208770472080766, 0.010208430182458312, 0.010208423950590753, 0.010208423836491684, 0.010208423834402965, 0.010208423834364735, 0.010208423834364036, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n84 Tolerância: 2.884035153701452\n=====================================================================\n85 Derivada: [-3.1810375019087758, 1.820996935937714, -2.9266955699656627, -0.8528947932750923, -0.41555968287614, -0.024862316606910215, -0.0005206784876807466, -9.482316433197246e-06, -1.7467073163551783e-07, -3.2013565756661144e-09, -5.872192315736768e-11, -1.0758338664373923e-12, -2.008115895790752e-14, -9.020562075079397e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n85 Solução: [0.7911436304210586, 0.6346403017532071, 0.3995960244221886, 0.16650531631757698, 0.03600561993523881, 0.011287264263972302, 0.010229222288120744, 0.010208805372743638, 0.010208430821999405, 0.010208423962323533, 0.010208423836706757, 0.01020842383440691, 0.010208423834364808, 0.010208423834364037, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n85 Tolerância: 4.78553175445657\n=====================================================================\n86 Derivada: [0.8220611884146081, -3.1259869429709113, -0.5691814459576143, -1.4209410447254465, -0.3692515009975654, -0.026924668927562564, -0.000537890306500477, -9.94625618994105e-06, -1.8225545186933978e-07, -3.34689499031926e-09, -6.137220939783639e-11, -1.125079196473422e-12, -2.0670964939739633e-14, -5.342948306008566e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n86 Solução: [0.7969682840500419, 0.6313059567777352, 0.40495496406445974, 0.16806701332674767, 0.036766532440505185, 0.011332788525337494, 0.010230175678906292, 0.01020882273538359, 0.010208431141831066, 0.010208423968185392, 0.01020842383681428, 0.01020842383440888, 0.010208423834364844, 0.010208423834364037, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n86 Tolerância: 3.595509954699175\n=====================================================================\n87 Derivada: [-1.7672009849325434, 0.07926825034445528, -2.097587405555771, -1.059266313255593, -0.41162757221885027, -0.02719941196415719, -0.0005668830548873835, -1.0371357014135607e-05, -1.9055993059208243e-07, -3.4963871034521254e-09, -6.414968209411676e-11, -1.1755804663060587e-12, -2.1711799025325718e-14, -9.71445146547012e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n87 Solução: [0.7954630450575836, 0.6370298098227258, 0.4059971664191185, 0.17066883408735334, 0.037442652132273185, 0.011382089066586693, 0.01023116058547337, 0.01020884094752261, 0.010208431475550766, 0.01020842397431374, 0.010208423836926656, 0.01020842383441094, 0.010208423834364882, 0.010208423834364039, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n87 Tolerância: 2.9700805057589754\n=====================================================================\n88 Derivada: [1.2991265677841568, -3.679213946075336, -0.29220668317562826, -1.5085866656041103, -0.3835553022420709, -0.030139457448222343, -0.000606756374639722, -1.1251287820382427e-05, -2.0581121223245402e-07, -3.779698706796175e-09, -6.934915489087423e-11, -1.272489058568027e-12, -2.3328561304936102e-14, -5.412337245047638e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n88 Solução: [0.8012875600225712, 0.6367685497202722, 0.4129106014716094, 0.17416006827410882, 0.03879933480438121, 0.011471735565980277, 0.01023302897444822, 0.010208875130461988, 0.010208432103616943, 0.010208423985837477, 0.010208423837138087, 0.010208423834414816, 0.010208423834364953, 0.010208423834364039, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n88 Tolerância: 4.211126571180464\n=====================================================================\n89 Derivada: [-2.079935537781296, 0.5052616502765517, -2.293617271763239, -1.0294552528663963, -0.4384507445540104, -0.030212265746749737, -0.0006406604667925017, -1.1720648299205771e-05, -2.1533540317530964e-07, -3.948428757560851e-09, -7.247103611662808e-11, -1.3294018663678742e-12, -2.4397150966137815e-14, -5.412337245047638e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n89 Solução: [0.7989087882309742, 0.6435053916625488, 0.413445647888557, 0.17692237295966323, 0.03950164553846704, 0.01152692256081955, 0.010234139978552175, 0.010208895732185292, 0.010208432480468528, 0.010208423992758313, 0.01020842383726507, 0.010208423834417146, 0.010208423834364997, 0.01020842383436404, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n89 Tolerância: 3.3309205530206487\n=====================================================================\n90 Derivada: [1.0859886993883663, -3.4044397652302223, -0.4083140195178281, -1.4974725571848464, -0.4038380433598733, -0.03289185638247574, -0.0006738317798014176, -1.2496244268606072e-05, -2.2874945529438584e-07, -4.198719809334062e-09, -7.702410237397928e-11, -1.4133069714539204e-12, -2.55351295663786e-14, -5.551115123125783e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n90 Solução: [0.8042406542335874, 0.6422101652641348, 0.41932528200806723, 0.17956135737252876, 0.040625603745941916, 0.011604370995961365, 0.010235782296643318, 0.010208925777792505, 0.010208433032475787, 0.010208424002880016, 0.010208423837450848, 0.010208423834420555, 0.01020842383436506, 0.010208423834364043, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n90 Tolerância: 3.9170005695714503\n=====================================================================\n91 Derivada: [-1.9471698414807292, 0.3694087094343246, -2.222796313335614, -1.059874830611955, -0.4558500358749583, -0.03309419727414292, -0.0007098595705346059, -1.301846321880712e-05, -2.3926807091873004e-07, -4.387066161737341e-09, -8.04970291812257e-11, -1.4766035616453621e-12, -2.706168622523819e-14, -1.1796119636642288e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n91 Solução: [0.8022521495350002, 0.6484438806545709, 0.4200729273074773, 0.1823033115177647, 0.04136505328822684, 0.011664597783771075, 0.01023701611948231, 0.01020894865909915, 0.01020843345132855, 0.010208424010568103, 0.010208423837591882, 0.010208423834423143, 0.010208423834365106, 0.010208423834364044, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n91 Tolerância: 3.193898288689186\n=====================================================================\n92 Derivada: [0.9530390406088145, -3.2224460116125897, -0.4815425705341525, -1.4964450763042492, -0.4238296195698809, -0.03590741262829863, -0.0007468886939272085, -1.3863832307253743e-05, -2.5412310482308964e-07, -4.6652742849429e-09, -8.55683336686841e-11, -1.5700218902736651e-12, -2.910865992689082e-14, -5.620504062164855e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n92 Solução: [0.8072436737868429, 0.6474969100859527, 0.4257710135599246, 0.1850202758052221, 0.042533614171207074, 0.01174943398283809, 0.01023883582785404, 0.01020898203162449, 0.010208434064686642, 0.010208424021814244, 0.010208423837798235, 0.010208423834426928, 0.010208423834365175, 0.010208423834364044, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n92 Tolerância: 3.7342461449475617\n=====================================================================\n93 Derivada: [-1.8612540844698913, 0.2926483470765078, -2.1792841380182466, -1.0839404227942775, -0.47442628741762916, -0.03621483947210055, -0.000786018643692328, -1.4439760289461367e-05, -2.6567046568226615e-07, -4.874216336103743e-09, -8.943256551985357e-11, -1.6406875857910563e-12, -2.9781732635569824e-14, -1.249000902703301e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n93 Solução: [0.8054986071841657, 0.6533973849607316, 0.4266527443409319, 0.1877603485767753, 0.043309669382821846, 0.011815182419047134, 0.010240203421898096, 0.010209007417059622, 0.010208434529999944, 0.010208424030356615, 0.010208423837954915, 0.010208423834429802, 0.01020842383436523, 0.010208423834364046, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n93 Tolerância: 3.114563926332639\n=====================================================================\n94 Derivada: [0.8813535199959404, -3.1150312680895524, -0.5184664130955454, -1.503830027585106, -0.4435766746099642, -0.03920094812138421, -0.0008275998585907593, -1.536600427506918e-05, -2.8200604421796394e-07, -5.181574717905146e-09, -9.506860923491622e-11, -1.7442436384129678e-12, -3.184952301893418e-14, -5.689893001203927e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n94 Solução: [0.8102698884065617, 0.6526471877819466, 0.4322392881517697, 0.19053900444575475, 0.04452585005125083, 0.011908018311248564, 0.01024221836226889, 0.010209044433046693, 0.010208435211039956, 0.010208424042851555, 0.010208423838184173, 0.01020842383443401, 0.010208423834365305, 0.010208423834364046, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n94 Tolerância: 3.6343931635843956\n=====================================================================\n95 Derivada: [-1.8144365804201072, 0.26447929985872065, -2.158409927499335, -1.102055834315819, -0.49409567920941067, -0.03958834900520612, -0.0008705347956155102, -1.600481581817692e-05, -2.9467910442337875e-07, -5.411842120273125e-09, -9.934926145649392e-11, -1.822854367450333e-12, -3.3431590829025026e-14, -5.828670879282072e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n95 Solução: [0.8086560819124284, 0.6583509803870911, 0.4331886285077796, 0.19329259946696772, 0.045338063200561066, 0.011979797391060669, 0.010243733742869337, 0.010209072569040849, 0.010208435727408446, 0.010208424052339302, 0.010208423838358249, 0.010208423834437203, 0.010208423834365364, 0.010208423834364048, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n95 Tolerância: 3.079137609327493\n=====================================================================\n96 Derivada: [0.8614910234192621, -3.0716680874306803, -0.5239314518063765, -1.5183217502050343, -0.46301504242362546, -0.042788589656642166, -0.0009171145471506548, -1.702759602969367e-05, -3.1267782158589386e-07, -5.750016754402232e-09, -1.0558004123750742e-10, -1.9376167337270545e-12, -3.554795346971673e-14, -5.828670879282072e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n96 Solução: [0.8133073475604781, 0.6576729939006368, 0.4387216617691915, 0.1961176937688027, 0.04660466589384691, 0.012081281195883585, 0.010245965338414738, 0.010209113597011085, 0.010208436482811424, 0.010208424066212432, 0.010208423838612929, 0.010208423834441876, 0.01020842383436545, 0.01020842383436405, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n96 Tolerância: 3.6018511019216386\n=====================================================================\n97 Derivada: [-1.8025673947972223, 0.2799121916613245, -2.1582133488683866, -1.1144082720305541, -0.5147311706052228, -0.04322613958182981, -0.0009645189997628742, -1.7738229122980786e-05, -3.266579386776436e-07, -6.003560822087639e-09, -1.1030092811337155e-10, -2.0247623022662253e-12, -3.7136960173711486e-14, -5.898059818321144e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n97 Solução: [0.811729910383807, 0.6632973861505709, 0.4396810089099502, 0.19889782392664884, 0.0474524717576597, 0.012159629443545893, 0.010247644625305273, 0.010209144775470613, 0.010208437055341616, 0.010208424076741027, 0.010208423838806251, 0.010208423834445424, 0.010208423834365515, 0.010208423834364051, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n97 Tolerância: 3.0812673223168963\n=====================================================================\n98 Derivada: [0.8894599762254529, -3.0881767461206806, -0.4996048964624151, -1.539619700542639, -0.4820195692954075, -0.04668284641696146, -0.0010164565388284252, -1.887262157038161e-05, -3.465752471745742e-07, -6.376192962509464e-09, -1.171682681433417e-10, -2.1518412052223823e-12, -3.8864744755784386e-14, -5.967448757360216e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n98 Solução: [0.8163507496526964, 0.662579837807689, 0.44521353824664894, 0.2017545834130553, 0.04877197304949438, 0.012270438639251266, 0.010250117147155251, 0.01020919024700523, 0.010208437892721585, 0.010208424092131015, 0.010208423839089004, 0.010208423834450614, 0.01020842383436561, 0.010208423834364053, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n98 Tolerância: 3.6307756370132864\n=====================================================================\n99 Derivada: [-1.8247886098360766, 0.3382501650822718, -2.178727026917265, -1.1208213787214514, -0.5362963362369204, -0.047134571045050526, -0.0010690106561928384, -1.966317408576279e-05, -3.6206504450825294e-07, -6.656204212807104e-09, -1.2237023200301067e-10, -2.2481044803512873e-12, -4.13766243489988e-14, -1.6653345369377348e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n99 Solução: [0.8147220997938851, 0.6682344583145018, 0.4461283421342144, 0.20457371128270124, 0.04965457724131946, 0.012355917484008886, 0.010251978334665313, 0.010209224803807423, 0.010208438527319817, 0.010208424103806173, 0.010208423839303546, 0.010208423834454554, 0.010208423834365682, 0.010208423834364055, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n99 Tolerância: 3.120447922463018\n=====================================================================\n100 Derivada: [0.9658469479623761, -3.1659448647319834, -0.44438753872358205, -1.5681604518190202, -0.5004334100567776, -0.0508977707369905, -0.001126603091795103, -2.0923701385199e-05, -3.8416660207152287e-07, -7.068260733655851e-09, -1.2994388204345952e-10, -2.387694209016189e-12, -4.400646513857964e-14, -6.106226635438361e-16, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, -2.7755575615628914e-17, 0.0, -1.3877787807814457e-17, 2.1510571102112408e-16, -3.469446951953614e-17, 9.020562075079397e-17, 1.734723475976807e-16, 1.0408340855860843e-16, 1.3877787807814457e-16, 1.1102230246251565e-16, 0.0, 0.0]\n100 Solução: [0.8193999026267168, 0.6673673619440517, 0.45171345780380207, 0.20744691061780263, 0.051029360329817426, 0.012476745852166364, 0.010254718718427525, 0.010209275209893336, 0.010208439455465072, 0.010208424120869196, 0.01020842383961724, 0.010208423834460316, 0.010208423834365788, 0.010208423834364055, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364022, 0.010208423834364015, 0.010208423834363699, 0.010208423834348288, 0.010208423833601238, 0.010208423797385026, 0.010208422041661194, 0.010208336926003357, 0.01020421057271055, 0.010004085062784374, 0.00010008171794342543]\n100 Tolerância: 3.7236675672343007\n=====================================================================\n"
],
[
"df",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
]
],
[
[
"**2.** Resolva:\n\nMinimizar $(x_1 - x^3_2)^2 + 3(x_1 - x_2)^4$",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sympy as sym \n\nx1 = sym.Symbol('x1')\nx2 = sym.Symbol('x2')\nvariaveis = [x1,x2]\n\nc = variaveis\n\nfo = 0\ndef fo(c):\n return (c[0] - c[1]**3)**2 + 3*(c[0] - c[1])**4\n\nx = [1.2,1.5]\n\neps = 1e-3\nnmax = 500\nd1f = gradiente_simbolico(fo(c),c)\np = Parametros(fo,d1f,c,x,eps,nmax)\nm,df = steepestDescent(p)",
"1 Derivada: [0.12518138031716364, -0.22882191410531721]\n1 Solução: [1.266755126953125, 1.0760106811523438]\n1 Tolerância: 0.2608253176992823\n=====================================================================\n2 Derivada: [0.04232431067070003, 0.021487096351164928]\n2 Solução: [1.2622166906794192, 1.0843065928331639]\n2 Tolerância: 0.04746622571212269\n=====================================================================\n3 Derivada: [0.08106090606500524, -0.15969019390560496]\n3 Solução: [1.224010113847165, 1.0649099769253434]\n3 Tolerância: 0.1790860924854002\n=====================================================================\n4 Derivada: [0.02485029017216174, 0.01304697856786572]\n4 Solução: [1.2208931478546752, 1.0710504079654148]\n4 Tolerância: 0.02806707272572232\n=====================================================================\n5 Derivada: [0.052321832665950384, -0.09966363537189901]\n5 Solução: [1.1983149362151, 1.0591963233120183]\n5 Tolerância: 0.11256293523654469\n=====================================================================\n6 Derivada: [0.017219445895577862, 0.00918986481213635]\n6 Solução: [1.1962264060293375, 1.0631745958665362]\n6 Tolerância: 0.019518271752798074\n=====================================================================\n7 Derivada: [0.03800867686194444, -0.07120475574132842]\n7 Solução: [1.1803543313274931, 1.0547038110525175]\n7 Tolerância: 0.08071416701532609\n=====================================================================\n8 Derivada: [0.013111799428512795, 0.006341140088083265]\n8 Solução: [1.1788093008837641, 1.057598242651866]\n8 Tolerância: 0.014564660719366075\n=====================================================================\n9 Derivada: [0.031132496315857974, -0.06437649089971201]\n9 Solução: [1.1601547571411717, 1.0485765156344575]\n9 Tolerância: 0.07150919456557746\n=====================================================================\n10 Derivada: [0.009227052816909623, 0.004046561704485276]\n10 Solução: [1.158866437286304, 1.0512405330191015]\n10 Tolerância: 0.010075374202194506\n=====================================================================\n11 Derivada: [0.024020232234937655, -0.05477313729330487]\n11 Solução: [1.1368857322421155, 1.0416008050328918]\n11 Tolerância: 0.059808595749871636\n=====================================================================\n12 Derivada: [0.0055855628751964505, 0.002881776426367229]\n12 Solução: [1.1358565450455258, 1.0439476521019775]\n12 Tolerância: 0.006285152981776873\n=====================================================================\n13 Derivada: [0.01426782788787831, -0.027657298792837185]\n13 Solução: [1.1287402706646146, 1.0402761310372808]\n13 Tolerância: 0.031120685872171764\n=====================================================================\n14 Derivada: [0.004685073956554129, 0.002344147124997902]\n14 Solução: [1.1281184915440126, 1.0414814112703183]\n14 Tolerância: 0.005238792200690698\n=====================================================================\n15 Derivada: [0.012481898229370517, -0.024947451553335536]\n15 Solução: [1.1211303422916767, 1.0379849349860961]\n15 Tolerância: 0.02789575455897163\n=====================================================================\n16 Derivada: [0.003960501065677126, 0.0017244242198304104]\n16 Solução: [1.1205863923810462, 1.0390721225110644]\n16 Tolerância: 0.004319630491276698\n=====================================================================\n17 Derivada: [0.012066318540466147, -0.027608746632263797]\n17 Solução: [1.108706339562823, 1.0338994813545834]\n17 Tolerância: 0.03013036564206506\n=====================================================================\n18 Derivada: [0.002795343796046878, 0.001372732643269186]\n18 Solução: [1.1081716626060127, 1.035122866978059]\n18 Tolerância: 0.00311421608884075\n=====================================================================\n19 Derivada: [0.007968786644618812, -0.016226411903511845]\n19 Solução: [1.1032692328797182, 1.032715390185353]\n19 Tolerância: 0.01807755525091504\n=====================================================================\n20 Derivada: [0.0023782384659740965, 0.001245044164222964]\n20 Solução: [1.102910287289891, 1.0334462917957528]\n20 Tolerância: 0.0026844279040243346\n=====================================================================\n21 Derivada: [0.006431130138998515, -0.012283163466577814]\n21 Solução: [1.0997148196635511, 1.0317734156830083]\n21 Tolerância: 0.013864903159106701\n=====================================================================\n22 Derivada: [0.0021083493967214537, 0.0012192728456990568]\n22 Solução: [1.0994204258888778, 1.0323356942840785]\n22 Tolerância: 0.0024355211867103095\n=====================================================================\n23 Derivada: [0.0052363868108317784, -0.009054759420506504]\n23 Solução: [1.0974971232714827, 1.031223435253816]\n23 Tolerância: 0.010459847752042296\n=====================================================================\n24 Derivada: [0.001957160981135788, 0.0011826850339765175]\n24 Solução: [1.0972535852618481, 1.031644561149716]\n24 Tolerância: 0.002286749438760713\n=====================================================================\n25 Derivada: [0.004703306340188537, -0.0077812286863363994]\n25 Solução: [1.095723358689073, 1.0307198666132635]\n25 Tolerância: 0.009092228021707451\n=====================================================================\n26 Derivada: [0.0018706369382640508, 0.0010584498261143752]\n26 Solução: [1.0955046136066007, 1.0310817621368835]\n26 Tolerância: 0.0021493251473891643\n=====================================================================\n27 Derivada: [0.004763007909022166, -0.008418588432519976]\n27 Solução: [1.0936282679374458, 1.0300200821782433]\n27 Tolerância: 0.009672583705379182\n=====================================================================\n28 Derivada: [0.0017381834461067456, 0.0010189046689387834]\n28 Solução: [1.093406746207302, 1.0304116205318554]\n28 Tolerância: 0.002014807290215313\n=====================================================================\n29 Derivada: [0.0043050183829753865, -0.007345607622727398]\n29 Solução: [1.0918720439531895, 1.0295119943757967]\n29 Tolerância: 0.008514172574292047\n=====================================================================\n30 Derivada: [0.0016560692238623952, 0.0009157905283991097]\n30 Solução: [1.0916718227613056, 1.029853629691258]\n30 Tolerância: 0.001892415801595864\n=====================================================================\n31 Derivada: [0.004333915813151718, -0.007838135891388792]\n31 Solução: [1.0898311842989927, 1.0288357741296035]\n31 Tolerância: 0.008956517209683889\n=====================================================================\n32 Derivada: [0.0015396302549809744, 0.000874212781307187]\n32 Solução: [1.0896296191226242, 1.0292003163384194]\n32 Tolerância: 0.0017705110304806428\n=====================================================================\n33 Derivada: [0.003942946924824818, -0.006945237513631915]\n33 Solução: [1.0880785243357154, 1.0283195939160403]\n33 Tolerância: 0.007986435661341391\n=====================================================================\n34 Derivada: [0.0014615736327720443, 0.0007883150425160444]\n34 Solução: [1.087895142648904, 1.0286426085025508]\n34 Tolerância: 0.0016606137691442713\n=====================================================================\n35 Derivada: [0.003946618271342349, -0.007318033423091624]\n35 Solução: [1.0860726359766915, 1.0276596204459956]\n35 Tolerância: 0.00831440971814472\n=====================================================================\n36 Derivada: [0.0013583727204875906, 0.0007459304091451246]\n36 Solução: [1.0858890835399995, 1.0279999733188225]\n36 Tolerância: 0.0015497059795497566\n=====================================================================\n37 Derivada: [0.0036136548216465854, -0.006581275185793651]\n37 Solução: [1.0843066920761795, 1.02713102631022]\n37 Tolerância: 0.00750810790020714\n=====================================================================\n38 Derivada: [0.0012840759580162804, 0.0006737231570145704]\n38 Solução: [1.0841386253663348, 1.0274371134495983]\n38 Tolerância: 0.0014500875691671549\n=====================================================================\n39 Derivada: [0.0035967274774786192, -0.006855729706912274]\n39 Solução: [1.0823117326306562, 1.026478587644544]\n39 Tolerância: 0.007741929886112968\n=====================================================================\n40 Derivada: [0.0011916959753915666, 0.000631299833880244]\n40 Solução: [1.0821444531910909, 1.0267974393313521]\n40 Tolerância: 0.0013485839899767759\n=====================================================================\n41 Derivada: [0.003313989418719195, -0.006255087121631857]\n41 Solução: [1.0805083442268295, 1.0259307121302228]\n41 Tolerância: 0.007078745705743885\n=====================================================================\n42 Derivada: [0.0011206763132576814, 0.00056979163914969]\n42 Solução: [1.0803542145919989, 1.0262216286577304]\n42 Tolerância: 0.0012572103686900294\n=====================================================================\n43 Derivada: [0.0032820647607224116, -0.0064556342657388485]\n43 Solução: [1.0784889287605453, 1.0252732510942775]\n43 Tolerância: 0.007242041346647879\n=====================================================================\n44 Derivada: [0.001036770186661903, 0.0005277792807279684]\n44 Solução: [1.0783362839028994, 1.0255734948266073]\n44 Tolerância: 0.0011633758589195018\n=====================================================================\n45 Derivada: [0.0030444693736104843, -0.005980323240658322]\n45 Solução: [1.076607616424919, 1.0246934976103643]\n45 Tolerância: 0.006710667614299659\n=====================================================================\n46 Derivada: [0.000968147915376289, 0.0004743664990639381]\n46 Solução: [1.0764660218410145, 1.0249716352024896]\n46 Tolerância: 0.0010781159313736307\n=====================================================================\n47 Derivada: [0.003000439757167411, -0.006123410184309283]\n47 Solução: [1.0745078623183253, 1.0240121895820073]\n47 Tolerância: 0.00681900220132634\n=====================================================================\n48 Derivada: [0.0008904212091206175, 0.00043248851025529393]\n48 Solução: [1.0743683154985644, 1.0242969819746126]\n48 Tolerância: 0.0009898970861431334\n=====================================================================\n"
],
[
"df",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
]
],
[
[
"**3.** Resolva:\n\n\n\n$2(x_1 - 2)^4 + (2x_1 - x_2)^2 = 4$\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sympy as sym \n\nx1 = sym.Symbol('x1')\nx2 = sym.Symbol('x2')\nvariaveis = [x1,x2]\n\nc = variaveis\n\nfo = 0\ndef fo(c):\n return (2*(c[0] - 2)**4 + (2*c[0] - c[1])**2 - 4)**2\n\nx = [1.2,0.5]\n\neps = 1e-3\nnmax = 300\nd1f = gradiente_simbolico(fo(c),c)\np = Parametros(fo,d1f,c,x,eps,nmax)\nm,df = steepestDescent(p)",
"1 Derivada: [0.060123472137406354, 0.01411795214220896]\n1 Solução: [0.8772600421875003, 0.8500033789062498]\n1 Tolerância: 0.06175879269016826\n=====================================================================\n2 Derivada: [0.002155724219767421, 0.0005046112987580334]\n2 Solução: [0.8767976668524592, 0.8498948057879686]\n2 Tolerância: 0.0022139962679566844\n=====================================================================\n3 Derivada: [7.057252525030156e-05, 1.651773667993772e-05]\n3 Solução: [0.8767810884069215, 0.849890925110256]\n3 Tolerância: 7.247976921343094e-05\n=====================================================================\n"
],
[
"df",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
]
],
[
[
"**4** Resolva:\n\nMinimizar $(x_1 - 3)^4 + (x_1 - 3x_2)^2$",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sympy as sym \n\nx1 = sym.Symbol('x1')\nx2 = sym.Symbol('x2')\nvariaveis = [x1,x2]\n\nc = variaveis\n\ndef f4(c):\n return (c[0] - 3)**4 + (c[0] - 3*c[1])**2\n\nx = [1.5,1]\n\neps = 1e-4\nnmax = 300\nd1f = gradiente_simbolico(f4(c),c)\np = Parametros(f4,d1f,c,x,eps,nmax)\nn,t = steepestDescent(p)",
"1 Derivada: [-1.2189947589327108, -2.2427215576171875]\n1 Solução: [2.2107467651367188, 0.6123199462890625]\n1 Tolerância: 2.552596365919665\n=====================================================================\n2 Derivada: [-1.5108617082256197, 0.8213843286490192]\n2 Solução: [2.323744097297916, 0.8202138284686953]\n2 Tolerância: 1.7197021011595695\n=====================================================================\n3 Derivada: [-0.46010848025836104, -0.8479326027019987]\n3 Solução: [2.429492428835253, 0.7627234427949732]\n3 Tolerância: 0.9647225053509657\n=====================================================================\n4 Derivada: [-0.7074229085276382, 0.38423439271131876]\n4 Solução: [2.474839118807005, 0.8462927280862972]\n4 Tolerância: 0.8050361731325736\n=====================================================================\n5 Derivada: [-0.25850749496767733, -0.4756084367156568]\n5 Solução: [2.5293401299330434, 0.8166906857157001]\n5 Tolerância: 0.5413220021665244\n=====================================================================\n6 Derivada: [-0.4290885798250921, 0.23323331388186297]\n6 Solução: [2.5554804065619314, 0.8647842085140806]\n6 Tolerância: 0.4883797580168839\n=====================================================================\n7 Derivada: [-0.17072691690908037, -0.31397144880007843]\n7 Solução: [2.5901487419832856, 0.8459400557277575]\n7 Tolerância: 0.35738739599893604\n=====================================================================\n8 Derivada: [-0.29502238372225076, 0.16027775443711256]\n8 Solução: [2.607647104728621, 0.8781200212671577]\n8 Tolerância: 0.33574866412922977\n=====================================================================\n9 Derivada: [-0.12340367025766241, -0.22691607478469855]\n9 Solução: [2.632185785967482, 0.8647888133900108]\n9 Tolerância: 0.2583009307547241\n=====================================================================\n10 Derivada: [-0.21866593309786886, 0.11888995943571778]\n10 Solução: [2.64494681308748, 0.8882539354422554]\n10 Tolerância: 0.2488967913657954\n=====================================================================\n11 Derivada: [-0.0944434783756174, -0.17392239794958542]\n11 Solução: [2.663494846566653, 0.8781692600805739]\n11 Tolerância: 0.1979105128996933\n=====================================================================\n12 Derivada: [-0.17026811765594108, 0.09238551201370271]\n12 Solução: [2.6733043673995205, 0.8962339842450459]\n12 Tolerância: 0.19371709971019957\n=====================================================================\n13 Derivada: [-0.07526963812538501, -0.13876841750403912]\n13 Solução: [2.687965362698783, 0.8882790977049255]\n13 Tolerância: 0.15786764114314789\n=====================================================================\n14 Derivada: [-0.13742571363053102, 0.07451414004073698]\n14 Solução: [2.6958109269388117, 0.9027433167596449]\n14 Tolerância: 0.15632716920891032\n=====================================================================\n15 Derivada: [-0.06183855923778925, -0.11395643492189578]\n15 Solução: [2.707769832652235, 0.8962590311661952]\n15 Tolerância: 0.12965367896328217\n=====================================================================\n16 Derivada: [-0.11393751965925514, 0.06178917914613535]\n16 Solução: [2.7142380840495592, 0.9081787601913051]\n16 Tolerância: 0.12961350641679423\n=====================================================================\n17 Derivada: [-0.051948705332584666, -0.09580085229020341]\n17 Solução: [2.724236476926152, 0.9027565560703726]\n17 Tolerância: 0.1089792240991882\n=====================================================================\n18 Derivada: [-0.0964604934054476, 0.052356586823034235]\n18 Solução: [2.729684527642565, 0.912803541815468]\n18 Tolerância: 0.10975353739985022\n=====================================================================\n19 Derivada: [-0.04444987700348779, -0.0819677154693288]\n19 Solução: [2.738202241194465, 0.9081803184276369]\n19 Tolerância: 0.09324429175497037\n=====================================================================\n20 Derivada: [-0.08300535752296145, 0.045016454438815856]\n20 Solução: [2.742872001374656, 0.9167915812603751]\n20 Tolerância: 0.09442653518877327\n=====================================================================\n21 Derivada: [-0.038616727867634815, -0.07111125959220743]\n21 Solução: [2.750239588116463, 0.912795903839254]\n21 Tolerância: 0.08092010202658717\n=====================================================================\n22 Derivada: [-0.0724276148827796, 0.03930791177236159]\n22 Solução: [2.7543071429332096, 0.9202861538539788]\n22 Tolerância: 0.08240674320413349\n=====================================================================\n23 Derivada: [-0.03394262755976207, -0.06248428517442406]\n23 Solução: [2.760762369046038, 0.9167827738389891]\n23 Tolerância: 0.07110828263584679\n=====================================================================\n24 Derivada: [-0.06391459588318948, 0.034701068802107216]\n24 Solução: [2.764343809316664, 0.9233757735945607]\n24 Tolerância: 0.07272715959612339\n=====================================================================\n25 Derivada: [-0.03012182756176074, -0.055517230492444725]\n25 Solução: [2.7700637053258026, 0.9202702778590205]\n25 Tolerância: 0.06316238894478013\n=====================================================================\n26 Derivada: [-0.056923775361696904, 0.030854764347903796]\n26 Solução: [2.773241995694296, 0.9261281521396488]\n26 Tolerância: 0.0647482253377929\n=====================================================================\n27 Derivada: [-0.026972556146465543, -0.04974233356830737]\n27 Solução: [2.7783571097248294, 0.9233555735989261]\n27 Tolerância: 0.056584613932543486\n=====================================================================\n28 Derivada: [-0.05113647890771045, 0.027722541687040803]\n28 Solução: [2.7812055756228045, 0.928608666412437]\n28 Tolerância: 0.05816767824718837\n=====================================================================\n29 Derivada: [-0.024333503961449843, -0.0449093866098238]\n29 Solução: [2.785814693983825, 0.9261099320718402]\n29 Tolerância: 0.05107810118546417\n=====================================================================\n30 Derivada: [-0.04625243577921978, 0.025040693950103332]\n30 Solução: [2.788384459927996, 0.9308526363065599]\n30 Tolerância: 0.05259585695673749\n=====================================================================\n31 Derivada: [-0.022102928401114497, -0.040798907211542]\n31 Solução: [2.792566065182999, 0.9285887491048027]\n31 Tolerância: 0.04640140378868734\n=====================================================================\n32 Derivada: [-0.042111794573864714, 0.022805991384206692]\n32 Solução: [2.794902292371636, 0.9329010969785568]\n32 Tolerância: 0.04789067221545226\n=====================================================================\n33 Derivada: [-0.0201955699797276, -0.037272307895193535]\n33 Solução: [2.7987172600899135, 0.9308350695913493]\n33 Tolerância: 0.04239205093694077\n=====================================================================\n34 Derivada: [-0.0385529489131633, 0.020882981891428898]\n34 Solução: [2.8008537329700016, 0.9347780766506355]\n34 Tolerância: 0.043845510631976066\n=====================================================================\n35 Derivada: [-0.018543425707026273, -0.03423634833019307]\n35 Solução: [2.804353359024146, 0.9328824336563711]\n35 Tolerância: 0.03893566724660458\n=====================================================================\n36 Derivada: [-0.03545898732865549, 0.019186525066977822]\n36 Solução: [2.8063150529088325, 0.9365042690288874]\n36 Tolerância: 0.04031702526873131\n=====================================================================\n37 Derivada: [-0.017091180964174946, -0.0316290659926608]\n37 Solução: [2.809543565137075, 0.9347573513794328]\n37 Tolerância: 0.03595144339686889\n=====================================================================\n38 Derivada: [-0.03275798630275073, 0.017708062595108487]\n38 Solução: [2.8113500624902676, 0.9381004687520397]\n38 Tolerância: 0.037237899343054846\n=====================================================================\n39 Derivada: [-0.015831320064989995, -0.029303306930582806]\n39 Solução: [2.814338648917638, 0.9364849214764026]\n39 Tolerância: 0.03330637314491173\n=====================================================================\n40 Derivada: [-0.03038245421208785, 0.016407917486617407]\n40 Solução: [2.8160119819573444, 0.9395822116239269]\n40 Tolerância: 0.03452988966384428\n=====================================================================\n41 Derivada: [-0.01471669001706477, -0.027255484384561157]\n41 Solução: [2.818789406322344, 0.9380822751971946]\n41 Tolerância: 0.03097486713604162\n=====================================================================\n42 Derivada: [-0.02828299063940598, 0.015266522687642947]\n42 Solução: [2.8203449256056112, 0.940963115351184]\n42 Tolerância: 0.03214022828918679\n=====================================================================\n43 Derivada: [-0.013722286616984825, -0.02544858960698093]\n43 Solução: [2.822935605427516, 0.9395647246087842]\n43 Tolerância: 0.028912486281591536\n=====================================================================\n44 Derivada: [-0.026408373296051835, 0.014231932884499088]\n44 Solução: [2.8243847623134433, 0.9422522503758421]\n44 Tolerância: 0.029999168218010814\n=====================================================================\n45 Derivada: [-0.01283868987807768, -0.02382302264619085]\n45 Solução: [2.826808565496057, 0.9409460205738972]\n45 Tolerância: 0.027062305256322427\n=====================================================================\n46 Derivada: [-0.02473792233117056, 0.013327323413076186]\n46 Solução: [2.8281644091948817, 0.9434618765879091]\n46 Tolerância: 0.028099508013838172\n=====================================================================\n47 Derivada: [-0.012051180152266028, -0.02234880633237779]\n47 Solução: [2.8304371605125693, 0.9422374531523908]\n47 Tolerância: 0.025390944991167626\n=====================================================================\n48 Derivada: [-0.023240728020645918, 0.012528392716546932]\n48 Solução: [2.831710941715492, 0.9445996690560832]\n48 Tolerância: 0.026402501074513047\n=====================================================================\n49 Derivada: [-0.011335858975809465, -0.02103463247464532]\n49 Solução: [2.833848268799544, 0.9434474989068121]\n49 Tolerância: 0.023894716195486474\n=====================================================================\n50 Derivada: [-0.02188530556706958, 0.011795921937906684]\n50 Solução: [2.8350464422481134, 0.9456708097459214]\n50 Tolerância: 0.02486182563950594\n=====================================================================\n51 Derivada: [-0.010690790747808876, -0.019838151515493507]\n51 Solução: [2.837061121943031, 0.9445849211190387]\n51 Tolerância: 0.022535422391539797\n=====================================================================\n52 Derivada: [-0.020654370185333448, 0.011125122330952308]\n52 Solução: [2.838191113207528, 0.9466817667542289]\n52 Tolerância: 0.023459994770490572\n=====================================================================\n53 Derivada: [-0.010105955424758228, -0.01874721496327325]\n53 Solução: [2.8400943685412257, 0.945656610904671]\n53 Tolerância: 0.021297614982114237\n=====================================================================\n54 Derivada: [-0.019541417234255398, 0.010536726845312927]\n54 Solução: [2.841163469324915, 0.9476398634886001]\n54 Tolerância: 0.02220111709207426\n=====================================================================\n55 Derivada: [-0.009575398060261087, -0.017744324692852587]\n55 Solução: [2.8429641684624523, 0.9466689270045479]\n55 Tolerância: 0.020163067892010625\n=====================================================================\n56 Derivada: [-0.018522301934478236, 0.009990653333662891]\n56 Solução: [2.8439780186690586, 0.9485477091860008]\n56 Tolerância: 0.02104492392443854\n=====================================================================\n57 Derivada: [-0.009082512588537206, -0.016847830559150623]\n57 Solução: [2.8456865043134973, 0.9476261775178796]\n57 Tolerância: 0.01914004779175816\n=====================================================================\n58 Derivada: [-0.017581905504294504, 0.009468908456664593]\n58 Solução: [2.8466473359140885, 0.9494084957745109]\n58 Tolerância: 0.01996956755973037\n=====================================================================\n59 Derivada: [-0.008627408414151994, -0.016033392923240797]\n59 Solução: [2.848272299318012, 0.9485333557213795]\n59 Tolerância: 0.018207192660473735\n=====================================================================\n60 Derivada: [-0.01672135961825294, 0.00899967716828698]\n60 Solução: [2.849184195956269, 0.9502280473836612]\n60 Tolerância: 0.018989419596615535\n=====================================================================\n61 Derivada: [-0.008213940985190682, -0.01526882240868943]\n61 Solução: [2.850731156456841, 0.9493954509073531]\n61 Tolerância: 0.017337986164958484\n=====================================================================\n62 Derivada: [-0.01593003777009283, 0.008571823581657867]\n62 Solução: [2.8515993505670156, 0.9510093292768751]\n62 Tolerância: 0.01808983866350527\n=====================================================================\n63 Derivada: [-0.007830828193583272, -0.0145678179446449]\n63 Solução: [2.853074561126505, 0.9502155304896882]\n63 Tolerância: 0.016539141146557055\n=====================================================================\n64 Derivada: [-0.015194540562966985, 0.008162386101091101]\n64 Solução: [2.8539015441881475, 0.9517539806238875]\n64 Tolerância: 0.01724814801023386\n=====================================================================\n65 Derivada: [-0.00748048047287142, -0.013907894716695068]\n65 Solução: [2.85531003469596, 0.9509973507477258]\n65 Tolerância: 0.015791995553307542\n=====================================================================\n66 Derivada: [-0.014521781365006703, 0.007808658494244725]\n66 Solução: [2.8561007037669377, 0.952467382283104]\n66 Tolerância: 0.016488095144460934\n=====================================================================\n67 Derivada: [-0.00715511202518293, -0.013297233490312976]\n67 Solução: [2.8574468314187995, 0.9517435419456934]\n67 Tolerância: 0.015100067767689601\n=====================================================================\n68 Derivada: [-0.013897860965796482, 0.007481828493425269]\n68 Solução: [2.858203764885453, 0.9531502454336747]\n68 Tolerância: 0.015783798561488288\n=====================================================================\n69 Derivada: [-0.006851996217691614, -0.012731717923188057]\n69 Solução: [2.8594920568985023, 0.952456701303768]\n69 Tolerância: 0.014458440214728842\n=====================================================================\n70 Derivada: [-0.0133126866747304, 0.007162884552439408]\n70 Solução: [2.8602169239910507, 0.9538035793610413]\n70 Tolerância: 0.015117358949602257\n=====================================================================\n71 Derivada: [-0.006567261535421309, -0.012212141438695312]\n71 Solução: [2.8614521908317037, 0.9531389446417514]\n71 Tolerância: 0.01386597715970005\n=====================================================================\n72 Derivada: [-0.012770015343146035, 0.006873811828974397]\n72 Solução: [2.862146936074396, 0.9544308571264083]\n72 Tolerância: 0.014502502574532904\n=====================================================================\n73 Derivada: [-0.006305423407375166, -0.011716283309361586]\n73 Solução: [2.8633318491496778, 0.9537930450882615]\n73 Tolerância: 0.0133052492998628\n=====================================================================\n74 Derivada: [-0.012259286688264481, 0.006591652121677782]\n74 Solução: [2.8639988947503143, 0.9550325011457536]\n74 Tolerância: 0.013919051253525792\n=====================================================================\n75 Derivada: [-0.006057970990740458, -0.011260438710728238]\n75 Solução: [2.86513754035077, 0.9544202668552163]\n75 Tolerância: 0.012786574697029691\n=====================================================================\n76 Derivada: [-0.011784422419473195, 0.006336541745721291]\n76 Solução: [2.865778408162031, 0.9556114994843282]\n76 Tolerância: 0.013379998993118463\n=====================================================================\n77 Derivada: [-0.005823537734525708, -0.010841326818599839]\n77 Solução: [2.8668740271470448, 0.9550223797813149]\n77 Tolerância: 0.012306419419682434\n=====================================================================\n78 Derivada: [-0.011337583370563564, 0.006091714569055995]\n78 Solução: [2.8674895612944913, 0.9561682823520004]\n78 Tolerância: 0.012870500498245145\n=====================================================================\n79 Derivada: [-0.005605308396126496, -0.010443443736033231]\n79 Solução: [2.8685446748347907, 0.9556013669595951]\n79 Tolerância: 0.011852636807199397\n=====================================================================\n80 Derivada: [-0.010916546629829327, 0.005856869027791589]\n80 Solução: [2.8691366294766607, 0.9567042581048754]\n80 Tolerância: 0.01238845854535351\n=====================================================================\n81 Derivada: [-0.0054017482851262955, -0.010065439940024845]\n81 Solução: [2.870153559365197, 0.9561586620139533]\n81 Tolerância: 0.011423308002593304\n=====================================================================\n82 Derivada: [-0.010523753941905412, 0.0056453049568290226]\n82 Solução: [2.8707240168218826, 0.9572216336604515]\n82 Tolerância: 0.011942314059066107\n=====================================================================\n83 Derivada: [-0.005207412504958597, -0.00971779649389859]\n83 Solução: [2.8717053196224653, 0.9566952289578273]\n83 Tolerância: 0.011025094724927622\n=====================================================================\n84 Derivada: [-0.0101526477253433, 0.005442094020697397]\n84 Solução: [2.872254777286525, 0.9577205976522137]\n84 Tolerância: 0.01151922927825594\n=====================================================================\n85 Derivada: [-0.005025469241709324, -0.009386850087249599]\n85 Solução: [2.873202405248804, 0.9572126434114208]\n85 Tolerância: 0.010647454891187109\n=====================================================================\n86 Derivada: [-0.009801618682681479, 0.005246964130957821]\n86 Solução: [2.8737322051601653, 0.958202233060664]\n86 Tolerância: 0.011117659888315012\n=====================================================================\n87 Derivada: [-0.004854829040217723, -0.00907159012767167]\n87 Solução: [2.8746479661341957, 0.9577120114820834]\n87 Tolerância: 0.01028898014645822\n=====================================================================\n88 Derivada: [-0.009473219792368148, 0.005071918912005202]\n88 Solução: [2.8751597766480943, 0.9586683654889206]\n88 Tolerância: 0.01074552254124813\n=====================================================================\n89 Derivada: [-0.004695501564954796, -0.008768163175698618]\n89 Solução: [2.8760448554565152, 0.9581944983090774]\n89 Tolerância: 0.009946276711522267\n=====================================================================\n90 Derivada: [-0.009164641288411879, 0.004912209604899687]\n90 Solução: [2.876540299080198, 0.9591196668936715]\n90 Tolerância: 0.010398098535200196\n=====================================================================\n91 Derivada: [-0.004542940083684499, -0.00848630206880685]\n91 Solução: [2.877396547533093, 0.9586607212849861]\n91 Tolerância: 0.009625779314267637\n=====================================================================\n92 Derivada: [-0.00886693470568467, 0.00474383800156275]\n92 Solução: [2.877875477785751, 0.9595553724842261]\n92 Tolerância: 0.010056168756536763\n=====================================================================\n93 Derivada: [-0.004399142262087707, -0.008217318505931104]\n93 Solução: [2.8787047234341143, 0.9591117234499308]\n93 Tolerância: 0.009320771216482247\n=====================================================================\n94 Derivada: [-0.008587799566416088, 0.0045933522811481]\n94 Solução: [2.8791684940928173, 0.959978017602114]\n94 Tolerância: 0.009739054706267174\n=====================================================================\n95 Derivada: [-0.0042600546210982415, -0.00797013640364952]\n95 Solução: [2.87997242094737, 0.9595480216266983]\n95 Tolerância: 0.009037208621444996\n=====================================================================\n96 Derivada: [-0.00832236975987577, 0.004448662216994137]\n96 Solução: [2.880421138553715, 0.960387527418849]\n96 Tolerância: 0.009436759716184675\n=====================================================================\n97 Derivada: [-0.004132691769081376, -0.007721934088603888]\n97 Solução: [2.8812002178151412, 0.9599710762667912]\n97 Tolerância: 0.008758276504368636\n=====================================================================\n98 Derivada: [-0.008072229091000693, 0.004317228547677132]\n98 Solução: [2.8816358984705457, 0.9607851455206085]\n98 Tolerância: 0.009154198207952255\n=====================================================================\n99 Derivada: [-0.004009786211505606, -0.007491400318286878]\n99 Solução: [2.8823915613958584, 0.9603809982253814]\n99 Tolerância: 0.0084970267853414\n=====================================================================\n100 Derivada: [-0.007836341502104993, 0.004198183539465106]\n100 Solução: [2.882814652088799, 0.9611714497817924]\n100 Tolerância: 0.008890050234315265\n=====================================================================\n101 Derivada: [-0.0038948877083209155, -0.007266135717724609]\n101 Solução: [2.8835475155083916, 0.9607788309629236]\n101 Tolerância: 0.008244202722448222\n=====================================================================\n102 Derivada: [-0.0076096283968043466, 0.004078187226411956]\n102 Solução: [2.8839588393278084, 0.9615461790662924]\n102 Tolerância: 0.008633542458986388\n=====================================================================\n103 Derivada: [-0.0037836356268590166, -0.007056917843417665]\n103 Solução: [2.8846705002842907, 0.9611647824367959]\n103 Tolerância: 0.008007245968844934\n=====================================================================\n104 Derivada: [-0.007392450589218669, 0.003960194248506355]\n104 Solução: [2.8850700752075924, 0.9619100358607812]\n104 Tolerância: 0.00838638564579177\n=====================================================================\n105 Derivada: [-0.003675018601859925, -0.006865006512168037]\n105 Solução: [2.885762102251734, 0.9615393114999019]\n105 Tolerância: 0.007786788563722919\n=====================================================================\n106 Derivada: [-0.007185110437029962, 0.003846828796337576]\n106 Solução: [2.8861498700988997, 0.962263669410541]\n106 Tolerância: 0.00815008612105718\n=====================================================================\n107 Derivada: [-0.0035752006389984103, -0.006670890001977625]\n107 Solução: [2.8868224874776103, 0.9619035574924268]\n107 Tolerância: 0.0075685423317558105\n=====================================================================\n108 Derivada: [-0.00698915229249053, 0.003744490139183654]\n108 Solução: [2.8872000503990964, 0.9626080440296535]\n108 Tolerância: 0.007929026180437877\n=====================================================================\n109 Derivada: [-0.0034780955238709055, -0.006490720156783425]\n109 Solução: [2.8878543236125123, 0.9622575123065716]\n109 Tolerância: 0.007363871035457199\n=====================================================================\n110 Derivada: [-0.006800983796674487, 0.003643702770574464]\n110 Solução: [2.888221631643529, 0.962942971812875]\n110 Tolerância: 0.0077155654674768296\n=====================================================================\n111 Derivada: [-0.0033861734661870813, -0.006315988246733184]\n111 Solução: [2.888858289901473, 0.9626018750645615]\n111 Tolerância: 0.007166441116480422\n=====================================================================\n112 Derivada: [-0.006622980110570609, 0.0035530786620014965]\n112 Solução: [2.8892162004178346, 0.9632694600649448]\n112 Tolerância: 0.0075158654540501335\n=====================================================================\n113 Derivada: [-0.0032997633936195925, -0.00614443438022505]\n113 Solução: [2.8898355889233533, 0.9629371721755496]\n113 Tolerância: 0.006974418420683081\n=====================================================================\n114 Derivada: [-0.00645101603256748, 0.0034613545967943082]\n114 Solução: [2.890184668202039, 0.9635871868782795]\n114 Tolerância: 0.00732096875401007\n=====================================================================\n115 Derivada: [-0.0032153176347051726, -0.005985258339745059]\n115 Solução: [2.890787974434699, 0.963263477126025]\n115 Tolerância: 0.0067942317362255054\n=====================================================================\n116 Derivada: [-0.006288204108224171, 0.0033791246367478323]\n116 Solução: [2.8911284146365306, 0.9638972006919962]\n116 Tolerância: 0.007138626914005488\n=====================================================================\n117 Derivada: [-0.003135820603659134, -0.0058286830604323825]\n117 Solução: [2.891715918813695, 0.9635814905456521]\n117 Tolerância: 0.0066186794058410575\n=====================================================================\n118 Derivada: [-0.006130659327252985, 0.0032956764493050628]\n118 Solução: [2.8920482289042164, 0.9641991694374781]\n118 Tolerância: 0.006960349635279685\n=====================================================================\n119 Derivada: [-0.0030579322357819905, -0.00568347770334654]\n119 Solução: [2.892621013740959, 0.9638912558190227]\n119 Tolerância: 0.0064539033431770506\n=====================================================================\n120 Derivada: [-0.005981388376954833, 0.0032211127730619182]\n120 Solução: [2.892945349784827, 0.9644940673045568]\n120 Tolerância: 0.006793568606612655\n=====================================================================\n121 Derivada: [-0.0029845118533984305, -0.005540373420814149]\n121 Solução: [2.8935036406890906, 0.9641934150396517]\n121 Tolerância: 0.00629309531511637\n=====================================================================\n122 Derivada: [-0.005836720371977577, 0.0031452239334690546]\n122 Solução: [2.893820462725286, 0.9647815555713992]\n122 Tolerância: 0.006630214045739747\n=====================================================================\n123 Derivada: [-0.0029124045393125186, -0.005407744473373555]\n123 Solução: [2.8943652506054645, 0.9644879866199675]\n123 Tolerância: 0.00614213321981137\n=====================================================================\n124 Derivada: [-0.005697159310721567, 0.00307035335290351]\n124 Solução: [2.894674418061214, 0.9650620478733436]\n124 Tolerância: 0.006471838527298658\n=====================================================================\n125 Derivada: [-0.002843719948004697, -0.005278668059709446]\n125 Solução: [2.895206179589285, 0.9647754671931112]\n125 Tolerância: 0.005995921916375873\n=====================================================================\n126 Derivada: [-0.005562455046819181, 0.0029965383669185996]\n126 Solução: [2.895508055805079, 0.9653358262887441]\n126 Tolerância: 0.006318239338003853\n=====================================================================\n127 Derivada: [-0.0027782443393853384, -0.005153029482144689]\n127 Solução: [2.8960272443048565, 0.9650561353526107]\n127 Tolerância: 0.005854259513651494\n=====================================================================\n128 Derivada: [-0.005434662992648498, 0.002930776442905625]\n128 Solução: [2.8963224242873875, 0.9656036290092906]\n128 Tolerância: 0.006174545521895006\n=====================================================================\n129 Derivada: [-0.0027137134446730826, -0.005036738696539089]\n129 Solução: [2.89682968493079, 0.9653300761604556]\n129 Tolerância: 0.0057212741025940836\n=====================================================================\n130 Derivada: [-0.00530893169468083, 0.002858995150589294]\n130 Solução: [2.897117760254804, 0.9658647531488562]\n130 Tolerância: 0.0060298100309943235\n=====================================================================\n131 Derivada: [-0.0026515226557286553, -0.004925136076924019]\n131 Solução: [2.897613771435934, 0.9655976384743711]\n131 Tolerância: 0.005593526416319213\n=====================================================================\n132 Derivada: [-0.005190243615510681, 0.0027969591329366494]\n132 Solução: [2.8978952448721507, 0.9661204682425467]\n132 Tolerância: 0.005895897656817588\n=====================================================================\n133 Derivada: [-0.002594654485918113, -0.004808849940967974]\n133 Solução: [2.898379691896627, 0.9658594056354884]\n133 Tolerância: 0.005464180602436442\n=====================================================================\n134 Derivada: [-0.005076928221132704, 0.002740266142112091]\n134 Solução: [2.8986556035652113, 0.9663707715296322]\n134 Tolerância: 0.005769251137898191\n=====================================================================\n135 Derivada: [-0.002538922071741112, -0.004699529470315156]\n135 Solução: [2.899129009149308, 0.9661152514125295]\n135 Tolerância: 0.0053415075146193355\n=====================================================================\n136 Derivada: [-0.004966608765398561, 0.002682245985855758]\n136 Solução: [2.899399226761771, 0.9666154225864713]\n136 Tolerância: 0.005644612135228881\n=====================================================================\n137 Derivada: [-0.00248372626226967, -0.004598445099109227]\n137 Solução: [2.8998623454471555, 0.9663653126435457]\n137 Tolerância: 0.005226336525273678\n=====================================================================\n138 Derivada: [-0.004859791905141186, 0.002624925843839776]\n138 Solução: [2.900126688566652, 0.9668547254024308]\n138 Tolerância: 0.005523387823332141\n=====================================================================\n139 Derivada: [-0.00243088929529911, -0.004499795270728413]\n139 Solução: [2.900579846957953, 0.9666099603598327]\n139 Tolerância: 0.005114428633236522\n=====================================================================\n140 Derivada: [-0.0047563230784302135, 0.0025683288020168504]\n140 Solução: [2.9008385666362173, 0.9670888738121846]\n140 Tolerância: 0.005405452993198365\n=====================================================================\n141 Derivada: [-0.0023802784137609834, -0.004403509173414477]\n141 Solução: [2.9012820769257113, 0.9668493862433808]\n141 Tolerância: 0.005005658634721504\n=====================================================================\n142 Derivada: [-0.00465801461323867, 0.0025184240247391187]\n142 Solução: [2.9015356280061324, 0.9673184551145295]\n142 Tolerância: 0.005295239343554565\n=====================================================================\n143 Derivada: [-0.0023300129777377165, -0.004314650463822289]\n143 Solução: [2.901969971379263, 0.967083620989542]\n143 Tolerância: 0.0049035873706285765\n=====================================================================\n144 Derivada: [-0.004560772220135512, 0.0024632017489238933]\n144 Solução: [2.902217954785631, 0.9675428294701506]\n144 Tolerância: 0.005183435742812052\n=====================================================================\n145 Derivada: [-0.0022835289161076133, -0.004222780614988153]\n145 Solução: [2.9026432306498466, 0.9673131446268939]\n145 Tolerância: 0.004800664582432243\n=====================================================================\n146 Derivada: [-0.004468345168785248, 0.0024144611199048427]\n146 Solução: [2.9028864758183546, 0.9677629620016686]\n146 Tolerância: 0.0050789497976391354\n=====================================================================\n147 Derivada: [-0.002237238609012593, -0.004138002231929505]\n147 Solução: [2.903303133187227, 0.9675378220495241]\n147 Tolerância: 0.004704072604149533\n=====================================================================\n148 Derivada: [-0.004378656154933047, 0.002366271878216253]\n148 Solução: [2.9035414474378607, 0.9679786086947435]\n148 Tolerância: 0.004977134951432413\n=====================================================================\n149 Derivada: [-0.00219279515008175, -0.004055129803859359]\n149 Solução: [2.9039497416236943, 0.9677579622187947]\n149 Tolerância: 0.004610035606844104\n=====================================================================\n150 Derivada: [-0.004291592366277186, 0.00231865076310811]\n150 Solução: [2.9041833216856867, 0.9681899211598461]\n150 Tolerância: 0.0048778997939226305\n=====================================================================\n151 Derivada: [-0.0021479990029336093, -0.00398029003202538]\n151 Solução: [2.9045838903884533, 0.9679735029054831]\n151 Tolerância: 0.0045228982362689055\n=====================================================================\n152 Derivada: [-0.004205822635483969, 0.002267865361783805]\n152 Solução: [2.9048123053825243, 0.9683967609809406]\n152 Tolerância: 0.004778300674960515\n=====================================================================\n153 Derivada: [-0.0021060637387115833, -0.003903765878540355]\n153 Solução: [2.905205253563632, 0.9681848753057362]\n153 Tolerância: 0.0044356389061748205\n=====================================================================\n154 Derivada: [-0.004124783391546316, 0.0022246711662212704]\n154 Solução: [2.9054292092245677, 0.9685999959174236]\n154 Tolerância: 0.004686469868140906\n=====================================================================\n155 Derivada: [-0.0020657378816260064, -0.003828890237478788]\n155 Solução: [2.9058145859443316, 0.9683921458571396]\n155 Tolerância: 0.00435059460835471\n=====================================================================\n156 Derivada: [-0.004046024369549706, 0.002181957012698632]\n156 Solução: [2.906034253414074, 0.968799304305397]\n156 Tolerância: 0.004596873894752262\n=====================================================================\n157 Derivada: [-0.0020269400835450924, -0.0037556200973760667]\n157 Solução: [2.906412271712639, 0.9685954450099142]\n157 Tolerância: 0.00426768888487631\n=====================================================================\n158 Derivada: [-0.003971128919742384, 0.0021448111271276105]\n158 Solução: [2.9066279990546, 0.9689951558585959]\n158 Tolerância: 0.004513322464467249\n=====================================================================\n159 Derivada: [-0.001990035488247166, -0.003682595909861419]\n159 Solução: [2.9069986563361256, 0.968794963450383]\n159 Tolerância: 0.004185899411095683\n=====================================================================\n160 Derivada: [-0.003897776438381406, 0.0021064820073242174]\n160 Solução: [2.907210638113217, 0.9691872394825903]\n160 Tolerância: 0.004430567414088393\n=====================================================================\n161 Derivada: [-0.0019530248077579415, -0.0036154494309670326]\n161 Solução: [2.907574448819906, 0.9689906246382483]\n161 Tolerância: 0.004109231130953549\n=====================================================================\n162 Derivada: [-0.0038264089137571844, 0.002068537194688247]\n162 Solução: [2.9077824881598557, 0.9693757481196567]\n162 Tolerância: 0.0043497415211813635\n=====================================================================\n163 Derivada: [-0.0019173654926136763, -0.003549675944825026]\n163 Solução: [2.9081396375636253, 0.9691826749687181]\n163 Tolerância: 0.004034413184781044\n=====================================================================\n164 Derivada: [-0.0037569514292377093, 0.0020309882527342893]\n164 Solução: [2.908343878416041, 0.9695607921527212]\n164 Tolerância: 0.004270784158020158\n=====================================================================\n165 Derivada: [-0.0018829925751973065, -0.003485241360170477]\n165 Solução: [2.908694544796239, 0.9693712237454034]\n165 Tolerância: 0.0039613846539929875\n=====================================================================\n166 Derivada: [-0.003689332877499396, 0.00199384542925074]\n166 Solução: [2.9088951241902734, 0.9697424772539384]\n166 Tolerância: 0.004193637642517775\n=====================================================================\n167 Derivada: [-0.0018498447708203969, -0.0034221128242748478]\n167 Solução: [2.909239479189231, 0.9695563756839505]\n167 Tolerância: 0.003890087641454617\n=====================================================================\n168 Derivada: [-0.0036234857163712775, 0.001957117754368909]\n168 Solução: [2.909436527625893, 0.9699209046394294]\n168 Tolerância: 0.0041182470349910615\n=====================================================================\n169 Derivada: [-0.0018160932718869205, -0.003365474323693718]\n169 Solução: [2.9097750683217143, 0.969738051978144]\n169 Tolerância: 0.003824213931729582\n=====================================================================\n170 Derivada: [-0.0035598040483391102, 0.0019221812840832797]\n170 Solução: [2.9099683552261855, 0.9700962395911775]\n170 Tolerância: 0.004045613149011107\n=====================================================================\n171 Derivada: [-0.0017848595690947988, -0.003305958504309814]\n171 Solução: [2.9103009461714366, 0.9699166510291285]\n171 Tolerância: 0.0037570048327900314\n=====================================================================\n172 Derivada: [-0.0034977519347032526, 0.0018876254380977286]\n172 Solução: [2.910490908871242, 0.9702685043703083]\n172 Tolerância: 0.0039745941165449826\n=====================================================================\n173 Derivada: [-0.0017547007227216582, -0.0032476156601042305]\n173 Solução: [2.9108177023144823, 0.9700921443459328]\n173 Tolerância: 0.0036913387953524594\n=====================================================================\n174 Derivada: [-0.003438719066961582, 0.0018578453711697307]\n174 Solução: [2.9110046158542975, 0.9704380855831641]\n174 Tolerância: 0.003908500766874931\n=====================================================================\n175 Derivada: [-0.001724272045415809, -0.003194221139150244]\n175 Solução: [2.9113258938823585, 0.9702645078975002]\n175 Tolerância: 0.0036298984520777853\n=====================================================================\n176 Derivada: [-0.0033797299928224334, 0.0018239929556997936]\n176 Solução: [2.9115094082487643, 0.9706044690249046]\n176 Tolerância: 0.003840511050215322\n=====================================================================\n177 Derivada: [-0.0016944918017305355, -0.0031429373057569876]\n177 Solução: [2.9118254843767857, 0.9704338871641642]\n177 Tolerância: 0.0035706242275057435\n=====================================================================\n178 Derivada: [-0.0033226269974395706, 0.0017917987372015887]\n178 Solução: [2.9120056740948987, 0.9707681024059217]\n178 Tolerância: 0.003774969255338579\n=====================================================================\n179 Derivada: [-0.0016669587900635108, -0.0030888944007649854]\n179 Solução: [2.9123164098869414, 0.9706005313844936]\n179 Tolerância: 0.0035099886362846364\n=====================================================================\n180 Derivada: [-0.0032683016110928165, 0.0017641400635284299]\n180 Solução: [2.9124938244005016, 0.9709292825814745]\n180 Tolerância: 0.003714025522908801\n=====================================================================\n181 Derivada: [-0.0016390970122417414, -0.0030395095875732636]\n181 Solução: [2.9127994796216683, 0.9707642982301353]\n181 Tolerância: 0.0034532966204034073\n=====================================================================\n182 Derivada: [-0.0032139276008802398, 0.001732599775440491]\n182 Solução: [2.912973778740905, 0.9710875151233818]\n182 Tolerância: 0.003651196051372242\n=====================================================================\n183 Derivada: [-0.0016117901918928368, -0.0029920655716999534]\n183 Solução: [2.9132746430876977, 0.9709253218308047]\n183 Tolerância: 0.003398576762121773\n=====================================================================\n184 Derivada: [-0.0031612560823761626, 0.0017026087405334067]\n184 Solução: [2.913445890876793, 0.9712432196667383]\n184 Tolerância: 0.0035906011393220826\n=====================================================================\n185 Derivada: [-0.0015850209789869396, -0.0029464788841693235]\n185 Solução: [2.9137421139236968, 0.9710836780365562]\n185 Tolerância: 0.0033457479460778897\n=====================================================================\n186 Derivada: [-0.00311021801906719, 0.0016740933058336793]\n186 Solução: [2.913910372452609, 0.9713964626678606]\n186 Tolerância: 0.003532144465160984\n=====================================================================\n187 Derivada: [-0.0015602869812685327, -0.0028982031262145824]\n187 Solução: [2.9142018130174145, 0.9712395930543486]\n187 Tolerância: 0.003291515885532994\n=====================================================================\n188 Derivada: [-0.0030616712664315315, 0.0016497967687705284]\n188 Solução: [2.9143675887475564, 0.9715475182918949]\n188 Tolerância: 0.0034778815853818445\n=====================================================================\n189 Derivada: [-0.0015366615121639171, -0.0028498244545680507]\n189 Solução: [2.91465419997222, 0.9713930764099307]\n189 Tolerância: 0.003237719571676952\n=====================================================================\n190 Derivada: [-0.003013869809874059, 0.0016245669449688194]\n190 Solução: [2.914817606253932, 0.9716961224704758]\n190 Tolerância: 0.003423832500218961\n=====================================================================\n191 Derivada: [-0.0015126316603497614, -0.002805692384775682]\n191 Solução: [2.9150997426565746, 0.9715440424197039]\n191 Tolerância: 0.0031874698897214277\n=====================================================================\n192 Derivada: [-0.0029671617007460682, 0.0015995579186487419]\n192 Solução: [2.9152605936398657, 0.9718423955421024]\n192 Tolerância: 0.0033708506483506795\n=====================================================================\n193 Derivada: [-0.001489344175029217, -0.002762325674506627]\n193 Solução: [2.91553835757166, 0.9716926566530809]\n193 Tolerância: 0.00313824619871226\n=====================================================================\n194 Derivada: [-0.0029215120597330113, 0.0015747748492813685]\n194 Solução: [2.9156967321986706, 0.971986398224517]\n194 Tolerância: 0.0033189077331397115\n=====================================================================\n195 Derivada: [-0.0014667687711487076, -0.0027197082672500983]\n195 Solução: [2.9159702227456443, 0.9718389793448121]\n195 Tolerância: 0.0030900200140072916\n=====================================================================\n196 Derivada: [-0.0028768875089539847, 0.001550222382526556]\n196 Solução: [2.9161261967380474, 0.972128189045045]\n196 Tolerância: 0.00326797661779606\n=====================================================================\n197 Derivada: [-0.0014434761933719287, -0.002681959607137685]\n197 Solução: [2.916395773248553, 0.9719829266602322]\n197 Tolerância: 0.003045739787826537\n=====================================================================\n198 Derivada: [-0.002832408506863615, 0.0015233243665555563]\n198 Solução: [2.9165490060347663, 0.9722676311430639]\n198 Tolerância: 0.003216062044721037\n=====================================================================\n199 Derivada: [-0.0014216532937805226, -0.002642548411952106]\n199 Solução: [2.9168146739896565, 0.9721247497514438]\n199 Tolerância: 0.003000693286096999\n=====================================================================\n200 Derivada: [-0.002790436597250867, 0.0015013284747382727]\n200 Solução: [2.9169655901573326, 0.9724052705232631]\n200 Tolerância: 0.003168678524611947\n=====================================================================\n201 Derivada: [-0.0014004780936609507, -0.0026037873851798565]\n201 Solução: [2.917227321324375, 0.9722644522533929]\n201 Tolerância: 0.0029565262789371526\n=====================================================================\n202 Derivada: [-0.002749377017252641, 0.0014795122136810335]\n202 Solução: [2.9173759896303078, 0.9725408583330849]\n202 Tolerância: 0.003122183590602638\n=====================================================================\n203 Derivada: [-0.0013799252879485024, -0.0025656634385597954]\n203 Solução: [2.917633869582167, 0.9724020863363579]\n203 Tolerância: 0.0029132151791932963\n=====================================================================\n204 Derivada: [-0.002709202067937788, 0.0014578797802435872]\n204 Solução: [2.9177803560968805, 0.9726744453534182]\n204 Tolerância: 0.003076554777435545\n=====================================================================\n205 Derivada: [-0.001359970739475358, -0.002528163872625555]\n205 Solução: [2.9180344678081784, 0.9725377023875802]\n205 Tolerância: 0.0028707373580803587\n=====================================================================\n206 Derivada: [-0.0026698851858570904, 0.0014364349815352284]\n206 Solução: [2.9181788360399064, 0.9728060806233874]\n206 Tolerância: 0.0030317704995327832\n=====================================================================\n207 Derivada: [-0.0013405914162634147, -0.002491276358668415]\n207 Solução: [2.918429259993725, 0.9726713490890933]\n207 Tolerância: 0.0028290710914749572\n=====================================================================\n208 Derivada: [-0.00263140088423075, 0.001415181258408893]\n208 Solução: [2.918571571005733, 0.972935811516267]\n208 Tolerância: 0.0029878100019382343\n=====================================================================\n209 Derivada: [-0.0013217653326984546, -0.0024549889217979626]\n209 Solução: [2.918818385294603, 0.9728030734914342]\n209 Tolerância: 0.00278819550980095\n=====================================================================\n210 Derivada: [-0.002594821703830519, 0.0013974413372217498]\n210 Solução: [2.918958818827494, 0.9730639085723437]\n210 Tolerância: 0.002947192217288521\n=====================================================================\n211 Derivada: [-0.0013037478784267975, -0.002418463982497343]\n211 Solução: [2.9192019645807172, 0.972932962416767]\n211 Tolerância: 0.0027474945978362333\n=====================================================================\n212 Derivada: [-0.002558683391408856, 0.001378869469402133]\n212 Solução: [2.9193406031757605, 0.9731901382513315]\n212 Tolerância: 0.002906568717770293\n=====================================================================\n213 Derivada: [-0.001285266414663866, -0.0023853932510604636]\n213 Solução: [2.9195803626164745, 0.9730609323582102]\n213 Tolerância: 0.002709614496356948\n=====================================================================\n214 Derivada: [-0.0025232857617405458, 0.001360431491626457]\n214 Solução: [2.9197170359203652, 0.9733145915007676]\n214 Tolerância: 0.00286666092846914\n=====================================================================\n215 Derivada: [-0.0012672979697336828, -0.00235283088933258]\n215 Solução: [2.9199534784536727, 0.9731871133240391]\n215 Tolerância: 0.002672425366196155\n=====================================================================\n216 Derivada: [-0.0024875560166082167, 0.0013389496908686738]\n216 Solução: [2.92008812499547, 0.9734370944257607]\n216 Tolerância: 0.0028250170283453256\n=====================================================================\n217 Derivada: [-0.0012495570273767242, -0.002321564660029196]\n217 Solução: [2.920321447243305, 0.973311506599989]\n217 Tolerância: 0.0026364853944907474\n=====================================================================\n218 Derivada: [-0.0024538912712239735, 0.0013217305000416957]\n218 Solução: [2.9204542088641183, 0.973558165760264]\n218 Tolerância: 0.0027872125655804725\n=====================================================================\n219 Derivada: [-0.0012334919237044772, -0.0022872405051010958]\n219 Solução: [2.9206841488405675, 0.9734343140299058]\n219 Tolerância: 0.0025986480050247834\n=====================================================================\n220 Derivada: [-0.0024216232312150865, 0.0013068361612873503]\n220 Solução: [2.9208154294522592, 0.9736777451597135]\n220 Tolerância: 0.002751741235365138\n=====================================================================\n221 Derivada: [-0.0012172072016412017, -0.002255387172940715]\n221 Solução: [2.921042124070843, 0.9735554087362287]\n221 Tolerância: 0.0025628821025542154\n=====================================================================\n222 Derivada: [-0.0023886823842094174, 0.001288065863160881]\n222 Solução: [2.921171671498691, 0.9737954497141837]\n222 Tolerância: 0.002713838094005015\n=====================================================================\n223 Derivada: [-0.0012001969302923854, -0.002227461717261292]\n223 Solução: [2.9213955011270087, 0.9736747525024884]\n223 Tolerância: 0.0025302289171827692\n=====================================================================\n224 Derivada: [-0.002356684853703328, 0.001270344348007768]\n224 Solução: [2.9215231282694525, 0.9739116174424846]\n224 Tolerância: 0.002677263203756769\n=====================================================================\n225 Derivada: [-0.001184528107779137, -0.002197302955149638]\n225 Solução: [2.921743959594016, 0.9737925808116081]\n225 Tolerância: 0.0024962466053713834\n=====================================================================\n226 Derivada: [-0.0023253118975334885, 0.001252773628728221]\n226 Solução: [2.921869920537362, 0.9740262387140501]\n226 Tolerância: 0.002641309748145353\n=====================================================================\n227 Derivada: [-0.001168141812021517, -0.0021709407317587193]\n227 Solução: [2.922088024972087, 0.9739087338389313]\n227 Tolerância: 0.0024652665076624063\n=====================================================================\n228 Derivada: [-0.002293856598691413, 0.001233259227923611]\n228 Solução: [2.9222120295281515, 0.9741391909109351]\n228 Tolerância: 0.0026043629583103194\n=====================================================================\n229 Derivada: [-0.001152809291182777, -0.00214309865842921]\n229 Solução: [2.9224273935975447, 0.9740234034959355]\n229 Tolerância: 0.0024334833308651237\n=====================================================================\n230 Derivada: [-0.0022642414349220985, 0.0012176665266281361]\n230 Solução: [2.922549770523795, 0.9742509049816334]\n230 Tolerância: 0.002570894989237933\n=====================================================================\n231 Derivada: [-0.0011378732004310166, -0.002115657200551624]\n231 Solução: [2.922762354104561, 0.9741365815237119]\n231 Tolerância: 0.0024022407894516025\n=====================================================================\n232 Derivada: [-0.0022351873238655173, 0.001202186205578215]\n232 Solução: [2.92288314548419, 0.9743611699505954]\n232 Tolerância: 0.0025379743981473963\n=====================================================================\n233 Derivada: [-0.0011233200234768148, -0.0020886093363401415]\n233 Solução: [2.9230930012520697, 0.9742482998986709]\n233 Tolerância: 0.002371526266983176\n=====================================================================\n234 Derivada: [-0.0022066794114836696, 0.0011868204783027636]\n234 Solução: [2.923212247733444, 0.9744700170488314]\n234 Tolerância: 0.002505589126889067\n=====================================================================\n235 Derivada: [-0.001109136788827847, -0.002061948225421162]\n235 Solução: [2.9234194269695717, 0.9743585896440006]\n235 Tolerância: 0.002341327593620514\n=====================================================================\n236 Derivada: [-0.0021787033753790652, 0.0011715713765916291]\n236 Solução: [2.923537167824062, 0.9745774765733868]\n236 Tolerância: 0.002473727529122182\n=====================================================================\n237 Derivada: [-0.0010953110442013525, -0.0020356672015253707]\n237 Solução: [2.923741720464881, 0.9744674808659868]\n237 Tolerância: 0.0023116330242743096\n=====================================================================\n238 Derivada: [-0.00215124540071443, 0.0011564407600701543]\n238 Solução: [2.923857993641982, 0.974683577922887]\n238 Tolerância: 0.002442378350224761\n=====================================================================\n239 Derivada: [-0.0010807880460887986, -0.0020128472355445126]\n239 Solução: [2.924060165279296, 0.9745748969133461]\n239 Tolerância: 0.002284656821977347\n=====================================================================\n240 Derivada: [-0.0021236430241993176, 0.0011394598175975545]\n240 Solução: [2.924174698863587, 0.9747882029443955]\n240 Tolerância: 0.00241002663266402\n=====================================================================\n241 Derivada: [-0.0010672153658441985, -0.001988582959256746]\n241 Solução: [2.9243744708857453, 0.9746810134641787]\n241 Tolerância: 0.002256858618287881\n=====================================================================\n242 Derivada: [-0.0020976827950276444, 0.00112606992529507]\n242 Solução: [2.924487566142029, 0.9748917481543038]\n242 Tolerância: 0.0023808205697215057\n=====================================================================\n243 Derivada: [-0.0010539764557311315, -0.0019646476375427824]\n243 Solução: [2.924684896074148, 0.9747858182670748]\n243 Tolerância: 0.0022295081764680735\n=====================================================================\n244 Derivada: [-0.0020713099940481428, 0.001110107664374027]\n244 Solução: [2.9247964918783826, 0.9749938366074817]\n244 Tolerância: 0.0023500349184524204\n=====================================================================\n245 Derivada: [-0.0010408506474588464, -0.0019416633161988273]\n245 Solução: [2.924991530543746, 0.9748893066636821]\n245 Tolerância: 0.002203049364809542\n=====================================================================\n246 Derivada: [-0.002046517379836743, 0.0010976258632382496]\n246 Solução: [2.925101736577723, 0.9750948914071987]\n246 Tolerância: 0.002322286787118112\n=====================================================================\n247 Derivada: [-0.0010280423800832494, -0.001918982384786716]\n247 Solução: [2.925294440721262, 0.9749915367745992]\n247 Tolerância: 0.0021770081599224527\n=====================================================================\n248 Derivada: [-0.0020213034506841865, 0.0010826194951008006]\n248 Solução: [2.925403196486355, 0.9751945443562905]\n248 Tolerância: 0.0022929746206445706\n=====================================================================\n249 Derivada: [-0.001015339715190855, -0.0018972034979363173]\n249 Solução: [2.925593711492075, 0.9750925036363619]\n249 Tolerância: 0.002151812224573893\n=====================================================================\n250 Derivada: [-0.001996761691136051, 0.0010684209848079718]\n250 Solução: [2.9257010304954796, 0.9752930335532046]\n250 Tolerância: 0.0022646369801728795\n=====================================================================\n251 Derivada: [-0.0010027429482883932, -0.0018762920019632645]\n251 Solução: [2.925889415162476, 0.9751922332762718]\n251 Tolerância: 0.0021274315728063765\n=====================================================================\n252 Derivada: [-0.0019728690286813233, 0.0010549946397304666]\n252 Solução: [2.9259953109135917, 0.9753903811178488]\n252 Tolerância: 0.00223723621778971\n=====================================================================\n253 Derivada: [-0.0009912047848805727, -0.0018533926384129984]\n253 Solução: [2.9261814414251326, 0.975290847550688]\n253 Tolerância: 0.0021017971352377558\n=====================================================================\n254 Derivada: [-0.0019493923005065028, 0.0010416753884925356]\n254 Solução: [2.9262861186760505, 0.9754865770802664]\n254 Tolerância: 0.002210252916809547\n=====================================================================\n255 Derivada: [-0.000978996314858982, -0.0018335909584905608]\n255 Solução: [2.926470212745689, 0.9753882047508692]\n255 Tolerância: 0.0020785787422096377\n=====================================================================\n256 Derivada: [-0.0019265300000670393, 0.0010290877143006583]\n256 Solução: [2.9265735110781668, 0.9755816752324055]\n256 Tolerância: 0.002184156442401244\n=====================================================================\n257 Derivada: [-0.0009678168080373695, -0.0018118271198552804]\n257 Solução: [2.926755446108843, 0.9754844916407336]\n257 Tolerância: 0.002054114623423611\n=====================================================================\n258 Derivada: [-0.0019040588721592755, 0.001016593934878074]\n258 Solução: [2.9268575648409536, 0.9756756657211444]\n258 Tolerância: 0.002158449308434029\n=====================================================================\n259 Derivada: [-0.0009568954429433063, -0.0017903506405012592]\n259 Solução: [2.9270373777736896, 0.9755796620000909]\n259 Tolerância: 0.0020300256906425926\n=====================================================================\n260 Derivada: [-0.0018827723126140228, 0.0010066199089528993]\n260 Solução: [2.927138431749529, 0.9757687339114512]\n260 Tolerância: 0.0021349742907693994\n=====================================================================\n261 Derivada: [-0.0009464052646235288, -0.0017686157386371804]\n261 Solução: [2.9273160620780336, 0.9756737642627535]\n261 Tolerância: 0.002005912399847578\n=====================================================================\n262 Derivada: [-0.001861637744301703, 0.0009960804036275306]\n262 Solução: [2.9274160948732613, 0.9758607027579552]\n262 Tolerância: 0.002111367154594273\n=====================================================================\n263 Derivada: [-0.000935436109751997, -0.0017492932761768998]\n263 Solução: [2.927591731258958, 0.9757667274598651]\n263 Tolerância: 0.0019837005019673866\n=====================================================================\n264 Derivada: [-0.0018400698515552705, 0.0009832283587449808]\n264 Solução: [2.9276905189987588, 0.9759514634639609]\n264 Tolerância: 0.0020862873876920656\n=====================================================================\n265 Derivada: [-0.0009245362410235103, -0.0017307419703662674]\n265 Solução: [2.9278642890230397, 0.9758586106759929]\n265 Tolerância: 0.001962201577043804\n=====================================================================\n266 Derivada: [-0.0018190595700326995, 0.0009710775549542916]\n266 Solução: [2.9279618410266206, 0.9760412290952599]\n266 Tolerância: 0.002062030391886491\n=====================================================================\n267 Derivada: [-0.000913706834936967, -0.0017129327961669105]\n267 Solução: [2.928133793450271, 0.9759494348836366]\n267 Tolerância: 0.0019413909818464493\n=====================================================================\n268 Derivada: [-0.001798588284818159, 0.0009595974965179721]\n268 Solução: [2.9282301191418387, 0.9761300173526417]\n268 Tolerância: 0.002038564979000838\n=====================================================================\n269 Derivada: [-0.0009038154676614951, -0.0016932687660471402]\n269 Solução: [2.928400136451782, 0.9760393083302582]\n269 Tolerância: 0.001919385712579671\n=====================================================================\n270 Derivada: [-0.0017792119164150577, 0.0009504974015079881]\n270 Solução: [2.9284955021127757, 0.97621797278212]\n270 Tolerância: 0.002017186246677976\n=====================================================================\n271 Derivada: [-0.0008943108027654745, -0.001673351579619009]\n271 Solução: [2.9286635249178095, 0.9761282109959577]\n271 Tolerância: 0.00189733953760429\n=====================================================================\n272 Derivada: [-0.0017599582913065959, 0.0009408702631859001]\n272 Solução: [2.9287579695751877, 0.9763049270952396]\n272 Tolerância: 0.001995667817871085\n=====================================================================\n273 Derivada: [-0.0008843342762032691, -0.0016557044476392946]\n273 Solução: [2.9289241741324523, 0.9762160744637263]\n273 Tolerância: 0.0018770733416679545\n=====================================================================\n274 Derivada: [-0.0017402701199253912, 0.0009290505492742795]\n274 Solução: [2.9290174842447563, 0.976390775334323]\n274 Tolerância: 0.001972732879386354\n=====================================================================\n275 Derivada: [-0.0008744087965979475, -0.0016387635850492188]\n275 Solução: [2.9291819888436477, 0.9763029538598241]\n275 Tolerância: 0.0018574543954701119\n=====================================================================\n276 Derivada: [-0.001721082674817076, 0.0009178797058062571]\n276 Solução: [2.9292741716191557, 0.9764757171900412]\n276 Tolerância: 0.0019505457512928228\n=====================================================================\n277 Derivada: [-0.0008645359493888449, -0.001622502664215375]\n277 Solução: [2.929437020033257, 0.9763888676408514]\n277 Tolerância: 0.001838460579716536\n=====================================================================\n278 Derivada: [-0.0017023794675443682, 0.0009073297345842946]\n278 Solução: [2.9295280828332237, 0.9765597681518847]\n278 Tolerância: 0.0019290783029150096\n=====================================================================\n279 Derivada: [-0.000855536503029164, -0.001604466236859281]\n279 Solução: [2.9296891615545104, 0.9764739168383447]\n279 Tolerância: 0.0018183109231472909\n=====================================================================\n280 Derivada: [-0.0016846920054494063, 0.0008990313820333995]\n280 Solução: [2.929779354756711, 0.9766430644401278]\n280 Tolerância: 0.001909566594572189\n=====================================================================\n281 Derivada: [-0.0008468855818692944, -0.0015861827178973442]\n281 Solução: [2.929938605656579, 0.976558080623421]\n281 Tolerância: 0.0017981075616698516\n=====================================================================\n282 Derivada: [-0.0016671032253210427, 0.0008902406235478111]\n282 Solução: [2.9300279643876372, 0.9767254459416317]\n282 Tolerância: 0.0018899104559980133\n=====================================================================\n283 Derivada: [-0.0008377716928320567, -0.0015700401201250713]\n283 Solução: [2.930185552651811, 0.9766412930994857]\n283 Tolerância: 0.001779575058297075\n=====================================================================\n284 Derivada: [-0.0016490815050316243, 0.0008793669487303646]\n284 Solução: [2.9302738730348947, 0.9768068113976721]\n284 Tolerância: 0.001868891661053877\n=====================================================================\n285 Derivada: [-0.0008286943630384513, -0.0015545458100518772]\n285 Solução: [2.930429908716241, 0.9767236059159662]\n285 Tolerância: 0.0017616319203742738\n=====================================================================\n286 Derivada: [-0.0016315110671110133, 0.0008690942038889204]\n286 Solução: [2.9305171962709906, 0.9768873484349907]\n286 Tolerância: 0.0018485543263153058\n=====================================================================\n287 Derivada: [-0.0008196554517487797, -0.001539675848984956]\n287 Solução: [2.9306717188111757, 0.9768050353898927]\n287 Tolerância: 0.00174425823189373\n=====================================================================\n288 Derivada: [-0.0016143772397283485, 0.000859396661446965]\n288 Solução: [2.9307579792428577, 0.9769670706732553]\n288 Tolerância: 0.0018288730119554804\n=====================================================================\n289 Derivada: [-0.0008114327777661856, -0.0015231043846775094]\n289 Solução: [2.9309108790159137, 0.9768856760950448]\n289 Tolerância: 0.0017257665309818714\n=====================================================================\n290 Derivada: [-0.00159818893284136, 0.0008518328929163488]\n290 Solução: [2.9309963483833594, 0.9770461068440596]\n290 Tolerância: 0.001811029304707917\n=====================================================================\n291 Derivada: [-0.0008035256503156063, -0.0015062924317454929]\n291 Solução: [2.9311475686221984, 0.9769655066278582]\n291 Tolerância: 0.001707211281783502\n=====================================================================\n292 Derivada: [-0.0015820794371590452, 0.0008438080792529945]\n292 Solução: [2.931232278685645, 0.9771243044551734]\n292 Tolerância: 0.0017930385997223007\n=====================================================================\n293 Derivada: [-0.0007951655835443106, -0.0014915040809206914]\n293 Solução: [2.931381974648014, 0.9770444635448424]\n293 Tolerância: 0.0016902286024844215\n=====================================================================\n294 Derivada: [-0.001566210275861124, 0.0008358219141264556]\n294 Solução: [2.9314658033683614, 0.9772017023402385]\n294 Tolerância: 0.001775278259977007\n=====================================================================\n295 Derivada: [-0.0007877291458786573, -0.0014746416112352279]\n295 Solução: [2.931613854403807, 0.9771226936006449]\n295 Tolerância: 0.0016718507974257594\n=====================================================================\n296 Derivada: [-0.0015504132275552251, 0.0008273871834454383]\n296 Solução: [2.9316969712715397, 0.9772782897118157]\n296 Tolerância: 0.0017573704013405897\n=====================================================================\n297 Derivada: [-0.000779841925939273, -0.0014597649847409855]\n297 Solução: [2.931843529040471, 0.977200078292116]\n297 Tolerância: 0.0016550127613189343\n=====================================================================\n298 Derivada: [-0.0015355048863447962, 0.0008209797426239618]\n298 Solução: [2.9319258850885888, 0.9773542383485642]\n298 Tolerância: 0.0017412015948153885\n=====================================================================\n299 Derivada: [-0.0007722590032077647, -0.0014446181537586256]\n299 Solução: [2.932070893019216, 0.9772767077756409]\n299 Tolerância: 0.001638079844880716\n=====================================================================\n300 Derivada: [-0.0015200007795064252, 0.0008121386362738292]\n300 Solução: [2.9321524482645955, 0.9774292682346583]\n300 Tolerância: 0.00172336053518376\n=====================================================================\n"
],
[
"t",
"_____no_output_____"
],
[
"n",
"_____no_output_____"
]
],
[
[
"**Conclusões**\n\n\nPode-se observar que ambos os métodos, Newton e Descida Gradiente, apresentam um enorme dificuldade para convergir quando se tem um grande número de variáveis, levando bastante tempo para convergir. Em ambos os casos, demorou bantante tempo para convergência quando o número de variáveis foi de 50. Para 10, o método atinge o máximo de iterações e não converge. \n\nO método da Descida Gradiente apresenta muitas mais iterações até a convergência se comparado ao método de Newton, porém tem a vantagem de convergir mesmo com pontos iniciais distante da solução. Além pode cair em um mínimo local mais facilmente e não apresentar uma precisão tão alta como no método de Newton. \n\nConforme pode-se observar, para ambos os métodos, com exceção da questão 3, todos as demais questões convergiram para ótimos locais próximos. O método de Descida Gradiente apresentando mais iterações e precisão menor, conforme dito. A respeito de não ser obtido pontos de convergência próximos na questão 3 pode-se dever ao fato da função apresentar muitos pontos de inflexão. Se a aplicação exigir uma precisão da ordem de $ 10^4 $, torna-se mais apropriado o uso do método de Newton.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7519df3b33114b3059202872c861a5eee190671 | 14,884 | ipynb | Jupyter Notebook | notebooks/pandas-7.ipynb | lincolwn/minicurso-ufc | ac1cd97cf7a40b022531ed53ef2b4d8c089a0f0d | [
"MIT"
] | null | null | null | notebooks/pandas-7.ipynb | lincolwn/minicurso-ufc | ac1cd97cf7a40b022531ed53ef2b4d8c089a0f0d | [
"MIT"
] | null | null | null | notebooks/pandas-7.ipynb | lincolwn/minicurso-ufc | ac1cd97cf7a40b022531ed53ef2b4d8c089a0f0d | [
"MIT"
] | null | null | null | 24.931323 | 83 | 0.310938 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"dados = {\n 'nomes': ['joão', 'maria', 'josé', np.nan, 'pedro', 'judas', 'tiago'],\n 'sexo': ['m', 'f', 'm', np.nan, 'm', 'm', np.nan],\n 'idade': [14, 13, np.nan, np.nan, 15, 13, 14],\n}",
"_____no_output_____"
],
[
"df = pd.DataFrame(dados)\ndf",
"_____no_output_____"
],
[
"df.dropna()",
"_____no_output_____"
],
[
"df.dropna(how='all')",
"_____no_output_____"
],
[
"df.dropna(how='all', axis=1)",
"_____no_output_____"
],
[
"df.dropna(thresh=3)",
"_____no_output_____"
],
[
"df.fillna(8, inplace=True)\ndf",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e751a1b9f56874421fa490a283f82d3fc5df7b62 | 55,748 | ipynb | Jupyter Notebook | deepl/Building+your+Deep+Neural+Network+-+Step+by+Step+v8.ipynb | stepinski/machinelearning | 1f84883a25616da4cd76bb4655267efd3421e561 | [
"MIT"
] | null | null | null | deepl/Building+your+Deep+Neural+Network+-+Step+by+Step+v8.ipynb | stepinski/machinelearning | 1f84883a25616da4cd76bb4655267efd3421e561 | [
"MIT"
] | null | null | null | deepl/Building+your+Deep+Neural+Network+-+Step+by+Step+v8.ipynb | stepinski/machinelearning | 1f84883a25616da4cd76bb4655267efd3421e561 | [
"MIT"
] | null | null | null | 37.314592 | 562 | 0.512503 | [
[
[
"# Building your Deep Neural Network: Step by Step\n\nWelcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!\n\n- In this notebook, you will implement all the functions required to build a deep neural network.\n- In the next assignment, you will use these functions to build a deep neural network for image classification.\n\n**After this assignment you will be able to:**\n- Use non-linear units like ReLU to improve your model\n- Build a deeper neural network (with more than 1 hidden layer)\n- Implement an easy-to-use neural network class\n\n**Notation**:\n- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. \n - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.\n- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example.\n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).\n\nLet's get started!",
"_____no_output_____"
],
[
"## 1 - Packages\n\nLet's first import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the main package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- dnn_utils provides some necessary functions for this notebook.\n- testCases provides some test cases to assess the correctness of your functions\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom testCases_v4 import *\nfrom dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
]
],
[
[
"## 2 - Outline of the Assignment\n\nTo build your neural network, you will be implementing several \"helper functions\". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:\n\n- Initialize the parameters for a two-layer network and for an $L$-layer neural network.\n- Implement the forward propagation module (shown in purple in the figure below).\n - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).\n - We give you the ACTIVATION function (relu/sigmoid).\n - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.\n - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.\n- Compute the loss.\n- Implement the backward propagation module (denoted in red in the figure below).\n - Complete the LINEAR part of a layer's backward propagation step.\n - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) \n - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function.\n - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function\n- Finally update the parameters.\n\n<img src=\"images/final outline.png\" style=\"width:800px;height:500px;\">\n<caption><center> **Figure 1**</center></caption><br>\n\n\n**Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. ",
"_____no_output_____"
],
[
"## 3 - Initialization\n\nYou will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.\n\n### 3.1 - 2-layer Neural Network\n\n**Exercise**: Create and initialize the parameters of the 2-layer neural network.\n\n**Instructions**:\n- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. \n- Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.\n- Use zero initialization for the biases. Use `np.zeros(shape)`.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n parameters -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(1)\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = np.random.randn(n_h,n_x)*0.01\n b1 = np.zeros((n_h,1))\n W2 = np.random.randn(n_y,n_h)*0.01\n b2 = np.zeros((n_y,1))\n ### END CODE HERE ###\n \n assert(W1.shape == (n_h, n_x))\n assert(b1.shape == (n_h, 1))\n assert(W2.shape == (n_y, n_h))\n assert(b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters ",
"_____no_output_____"
],
[
"parameters = initialize_parameters(3,2,1)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"W1 = [[ 0.01624345 -0.00611756 -0.00528172]\n [-0.01072969 0.00865408 -0.02301539]]\nb1 = [[ 0.]\n [ 0.]]\nW2 = [[ 0.01744812 -0.00761207]]\nb2 = [[ 0.]]\n"
]
],
[
[
"**Expected output**:\n \n<table style=\"width:80%\">\n <tr>\n <td> **W1** </td>\n <td> [[ 0.01624345 -0.00611756 -0.00528172]\n [-0.01072969 0.00865408 -0.02301539]] </td> \n </tr>\n\n <tr>\n <td> **b1**</td>\n <td>[[ 0.]\n [ 0.]]</td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[ 0.01744812 -0.00761207]]</td>\n </tr>\n \n <tr>\n <td> **b2** </td>\n <td> [[ 0.]] </td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"### 3.2 - L-layer Neural Network\n\nThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:\n\n<table style=\"width:100%\">\n\n\n <tr>\n <td> </td> \n <td> **Shape of W** </td> \n <td> **Shape of b** </td> \n <td> **Activation** </td>\n <td> **Shape of Activation** </td> \n <tr>\n \n <tr>\n <td> **Layer 1** </td> \n <td> $(n^{[1]},12288)$ </td> \n <td> $(n^{[1]},1)$ </td> \n <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> \n \n <td> $(n^{[1]},209)$ </td> \n <tr>\n \n <tr>\n <td> **Layer 2** </td> \n <td> $(n^{[2]}, n^{[1]})$ </td> \n <td> $(n^{[2]},1)$ </td> \n <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> \n <td> $(n^{[2]}, 209)$ </td> \n <tr>\n \n <tr>\n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$</td> \n <td> $\\vdots$ </td> \n <tr>\n \n <tr>\n <td> **Layer L-1** </td> \n <td> $(n^{[L-1]}, n^{[L-2]})$ </td> \n <td> $(n^{[L-1]}, 1)$ </td> \n <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> \n <td> $(n^{[L-1]}, 209)$ </td> \n <tr>\n \n \n <tr>\n <td> **Layer L** </td> \n <td> $(n^{[L]}, n^{[L-1]})$ </td> \n <td> $(n^{[L]}, 1)$ </td>\n <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>\n <td> $(n^{[L]}, 209)$ </td> \n <tr>\n\n</table>\n\nRemember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: \n\n$$ W = \\begin{bmatrix}\n j & k & l\\\\\n m & n & o \\\\\n p & q & r \n\\end{bmatrix}\\;\\;\\; X = \\begin{bmatrix}\n a & b & c\\\\\n d & e & f \\\\\n g & h & i \n\\end{bmatrix} \\;\\;\\; b =\\begin{bmatrix}\n s \\\\\n t \\\\\n u\n\\end{bmatrix}\\tag{2}$$\n\nThen $WX + b$ will be:\n\n$$ WX + b = \\begin{bmatrix}\n (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\\\\n (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\\\\n (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u\n\\end{bmatrix}\\tag{3} $$",
"_____no_output_____"
],
[
"**Exercise**: Implement initialization for an L-layer Neural Network. \n\n**Instructions**:\n- The model's structure is *[LINEAR -> RELU] $ \\times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.\n- Use random initialization for the weight matrices. Use `np.random.randn(shape) * 0.01`.\n- Use zeros initialization for the biases. Use `np.zeros(shape)`.\n- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the \"Planar Data classification model\" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. This means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! \n- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).\n```python\n if L == 1:\n parameters[\"W\" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01\n parameters[\"b\" + str(L)] = np.zeros((layer_dims[1], 1))\n```",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_parameters_deep\n\ndef initialize_parameters_deep(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])*0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n ### END CODE HERE ###\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n \n return parameters",
"_____no_output_____"
],
[
"parameters = initialize_parameters_deep([5,4,3])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]\nb1 = [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\nW2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]\nb2 = [[ 0.]\n [ 0.]\n [ 0.]]\n"
]
],
[
[
"**Expected output**:\n \n<table style=\"width:80%\">\n <tr>\n <td> **W1** </td>\n <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> \n </tr>\n \n <tr>\n <td>**b1** </td>\n <td>[[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]</td> \n </tr>\n \n <tr>\n <td>**W2** </td>\n <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> \n </tr>\n \n <tr>\n <td>**b2** </td>\n <td>[[ 0.]\n [ 0.]\n [ 0.]]</td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"## 4 - Forward propagation module\n\n### 4.1 - Linear Forward \nNow that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:\n\n- LINEAR\n- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. \n- [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID (whole model)\n\nThe linear forward module (vectorized over all the examples) computes the following equations:\n\n$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\\tag{4}$$\n\nwhere $A^{[0]} = X$. \n\n**Exercise**: Build the linear part of forward propagation.\n\n**Reminder**:\nThe mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_forward\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n Returns:\n Z -- the input of the activation function, also called pre-activation parameter \n cache -- a python tuple containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n Z = np.dot(W,A)+b\n ### END CODE HERE ###\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache",
"_____no_output_____"
],
[
"A, W, b = linear_forward_test_case()\n\nZ, linear_cache = linear_forward(A, W, b)\nprint(\"Z = \" + str(Z))",
"Z = [[ 3.26295337 -1.23429987]]\n"
]
],
[
[
"**Expected output**:\n\n<table style=\"width:35%\">\n \n <tr>\n <td> **Z** </td>\n <td> [[ 3.26295337 -1.23429987]] </td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"### 4.2 - Linear-Activation Forward\n\nIn this notebook, you will use two activation functions:\n\n- **Sigmoid**: $\\sigma(Z) = \\sigma(W A + b) = \\frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value \"`a`\" and a \"`cache`\" that contains \"`Z`\" (it's what we will feed in to the corresponding backward function). To use it you could just call: \n``` python\nA, activation_cache = sigmoid(Z)\n```\n\n- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value \"`A`\" and a \"`cache`\" that contains \"`Z`\" (it's what we will feed in to the corresponding backward function). To use it you could just call:\n``` python\nA, activation_cache = relu(Z)\n```",
"_____no_output_____"
],
[
"For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.\n\n**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation \"g\" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_activation_forward\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n Arguments:\n A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n A -- the output of the activation function, also called the post-activation value \n cache -- a python tuple containing \"linear_cache\" and \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n \n if activation == \"sigmoid\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev,W,b)\n A, activation_cache = sigmoid(Z)\n ### END CODE HERE ###\n \n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev,W,b)\n A, activation_cache = relu(Z)\n ### END CODE HERE ###\n \n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache",
"_____no_output_____"
],
[
"A_prev, W, b = linear_activation_forward_test_case()\n\nA, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"sigmoid\")\nprint(\"With sigmoid: A = \" + str(A))\n\nA, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"relu\")\nprint(\"With ReLU: A = \" + str(A))",
"With sigmoid: A = [[ 0.96890023 0.11013289]]\nWith ReLU: A = [[ 3.43896131 0. ]]\n"
]
],
[
[
"**Expected output**:\n \n<table style=\"width:35%\">\n <tr>\n <td> **With sigmoid: A ** </td>\n <td > [[ 0.96890023 0.11013289]]</td> \n </tr>\n <tr>\n <td> **With ReLU: A ** </td>\n <td > [[ 3.43896131 0. ]]</td> \n </tr>\n</table>\n",
"_____no_output_____"
],
[
"**Note**: In deep learning, the \"[LINEAR->ACTIVATION]\" computation is counted as a single layer in the neural network, not two layers. ",
"_____no_output_____"
],
[
"### d) L-Layer Model \n\nFor even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.\n\n<img src=\"images/model_architecture_kiank.png\" style=\"width:600px;height:300px;\">\n<caption><center> **Figure 2** : *[LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>\n\n**Exercise**: Implement the forward propagation of the above model.\n\n**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \\sigma(Z^{[L]}) = \\sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\\hat{Y}$.) \n\n**Tips**:\n- Use the functions you had previously written \n- Use a for loop to replicate [LINEAR->RELU] (L-1) times\n- Don't forget to keep track of the caches in the \"caches\" list. To add a new value `c` to a `list`, you can use `list.append(c)`.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: L_model_forward\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n \n Arguments:\n X -- data, numpy array of shape (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n \n Returns:\n AL -- last post-activation value\n caches -- list of caches containing:\n every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n ### START CODE HERE ### (≈ 2 lines of code)\n A, cache = linear_activation_forward(A_prev, parameters[\"W\"+str(l)], parameters[\"b\" + str(l)], activation = \"relu\")\n caches.append(cache)\n ### END CODE HERE ###\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n ### START CODE HERE ### (≈ 2 lines of code)\n AL, cache = linear_activation_forward(A, parameters[\"W\"+str(L)], parameters[\"b\" + str(L)], activation = \"sigmoid\")\n caches.append(cache)\n ### END CODE HERE ###\n \n assert(AL.shape == (1,X.shape[1]))\n \n return AL, caches",
"_____no_output_____"
],
[
"X, parameters = L_model_forward_test_case_2hidden()\nAL, caches = L_model_forward(X, parameters)\nprint(\"AL = \" + str(AL))\nprint(\"Length of caches list = \" + str(len(caches)))",
"AL = [[ 0.03921668 0.70498921 0.19734387 0.04728177]]\nLength of caches list = 3\n"
]
],
[
[
"<table style=\"width:50%\">\n <tr>\n <td> **AL** </td>\n <td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td> \n </tr>\n <tr>\n <td> **Length of caches list ** </td>\n <td > 3 </td> \n </tr>\n</table>",
"_____no_output_____"
],
[
"Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in \"caches\". Using $A^{[L]}$, you can compute the cost of your predictions.",
"_____no_output_____"
],
[
"## 5 - Cost function\n\nNow you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.\n\n**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} (y^{(i)}\\log\\left(a^{[L] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right)) \\tag{7}$$\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: compute_cost\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation (7).\n\n Arguments:\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n Returns:\n cost -- cross-entropy cost\n \"\"\"\n \n m = Y.shape[1]\n\n # Compute loss from aL and y.\n ### START CODE HERE ### (≈ 1 lines of code)\n cost = -1/m*( np.dot(Y,np.log(AL).T)+ np.dot(1-Y,np.log(1-AL).T)) \n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost",
"_____no_output_____"
],
[
"Y, AL = compute_cost_test_case()\n\nprint(\"cost = \" + str(compute_cost(AL, Y)))",
"cost = 0.41493159961539694\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n\n <tr>\n <td>**cost** </td>\n <td> 0.41493159961539694</td> \n </tr>\n</table>",
"_____no_output_____"
],
[
"## 6 - Backward propagation module\n\nJust like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. \n\n**Reminder**: \n<img src=\"images/backprop_kiank.png\" style=\"width:650px;height:250px;\">\n<caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption>\n\n<!-- \nFor those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:\n\n$$\\frac{d \\mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \\frac{d\\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\\frac{{da^{[2]}}}{{dz^{[2]}}}\\frac{{dz^{[2]}}}{{da^{[1]}}}\\frac{{da^{[1]}}}{{dz^{[1]}}} \\tag{8} $$\n\nIn order to calculate the gradient $dW^{[1]} = \\frac{\\partial L}{\\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.\n\nEquivalently, in order to calculate the gradient $db^{[1]} = \\frac{\\partial L}{\\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial b^{[1]}}$.\n\nThis is why we talk about **backpropagation**.\n!-->\n\nNow, similar to forward propagation, you are going to build the backward propagation in three steps:\n- LINEAR backward\n- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation\n- [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)",
"_____no_output_____"
],
[
"### 6.1 - Linear backward\n\nFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).\n\nSuppose you have already calculated the derivative $dZ^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$.\n\n<img src=\"images/linearback_kiank.png\" style=\"width:250px;height:300px;\">\n<caption><center> **Figure 4** </center></caption>\n\nThe three outputs $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:\n$$ dW^{[l]} = \\frac{\\partial \\mathcal{J} }{\\partial W^{[l]}} = \\frac{1}{m} dZ^{[l]} A^{[l-1] T} \\tag{8}$$\n$$ db^{[l]} = \\frac{\\partial \\mathcal{J} }{\\partial b^{[l]}} = \\frac{1}{m} \\sum_{i = 1}^{m} dZ^{[l](i)}\\tag{9}$$\n$$ dA^{[l-1]} = \\frac{\\partial \\mathcal{L} }{\\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \\tag{10}$$\n",
"_____no_output_____"
],
[
"**Exercise**: Use the 3 formulas above to implement linear_backward().",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_backward\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implement the linear portion of backward propagation for a single layer (layer l)\n\n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = 1/m*np.dot(dZ,A_prev.T)\n db = 1/m*np.sum(dZ,axis=1,keepdims=True)\n dA_prev = np.dot(W.T,dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db",
"_____no_output_____"
],
[
"# Set up some test inputs\ndZ, linear_cache = linear_backward_test_case()\n\ndA_prev, dW, db = linear_backward(dZ, linear_cache)\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))",
"dA_prev = [[ 0.51822968 -0.19517421]\n [-0.40506361 0.15255393]\n [ 2.37496825 -0.89445391]]\ndW = [[-0.10076895 1.40685096 1.64992505]]\ndb = [[ 0.50629448]]\n"
]
],
[
[
"**Expected Output**: \n\n<table style=\"width:90%\">\n <tr>\n <td> **dA_prev** </td>\n <td > [[ 0.51822968 -0.19517421]\n [-0.40506361 0.15255393]\n [ 2.37496825 -0.89445391]] </td> \n </tr> \n \n <tr>\n <td> **dW** </td>\n <td > [[-0.10076895 1.40685096 1.64992505]] </td> \n </tr> \n \n <tr>\n <td> **db** </td>\n <td> [[ 0.50629448]] </td> \n </tr> \n \n</table>\n\n",
"_____no_output_____"
],
[
"### 6.2 - Linear-Activation backward\n\nNext, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. \n\nTo help you implement `linear_activation_backward`, we provided two backward functions:\n- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:\n\n```python\ndZ = sigmoid_backward(dA, activation_cache)\n```\n\n- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:\n\n```python\ndZ = relu_backward(dA, activation_cache)\n```\n\nIf $g(.)$ is the activation function, \n`sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \\tag{11}$$. \n\n**Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_activation_backward\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n ### END CODE HERE ###\n \n elif activation == \"sigmoid\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n ### END CODE HERE ###\n \n return dA_prev, dW, db",
"_____no_output_____"
],
[
"dAL, linear_activation_cache = linear_activation_backward_test_case()\n\ndA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = \"sigmoid\")\nprint (\"sigmoid:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db) + \"\\n\")\n\ndA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = \"relu\")\nprint (\"relu:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))",
"sigmoid:\ndA_prev = [[ 0.11017994 0.01105339]\n [ 0.09466817 0.00949723]\n [-0.05743092 -0.00576154]]\ndW = [[ 0.10266786 0.09778551 -0.01968084]]\ndb = [[-0.05729622]]\n\nrelu:\ndA_prev = [[ 0.44090989 -0. ]\n [ 0.37883606 -0. ]\n [-0.2298228 0. ]]\ndW = [[ 0.44513824 0.37371418 -0.10478989]]\ndb = [[-0.20837892]]\n"
]
],
[
[
"**Expected output with sigmoid:**\n\n<table style=\"width:100%\">\n <tr>\n <td > dA_prev </td> \n <td >[[ 0.11017994 0.01105339]\n [ 0.09466817 0.00949723]\n [-0.05743092 -0.00576154]] </td> \n\n </tr> \n \n <tr>\n <td > dW </td> \n <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> \n </tr> \n \n <tr>\n <td > db </td> \n <td > [[-0.05729622]] </td> \n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"**Expected output with relu:**\n\n<table style=\"width:100%\">\n <tr>\n <td > dA_prev </td> \n <td > [[ 0.44090989 0. ]\n [ 0.37883606 0. ]\n [-0.2298228 0. ]] </td> \n\n </tr> \n \n <tr>\n <td > dW </td> \n <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> \n </tr> \n \n <tr>\n <td > db </td> \n <td > [[-0.20837892]] </td> \n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"### 6.3 - L-Model Backward \n\nNow you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. \n\n\n<img src=\"images/mn_backward.png\" style=\"width:450px;height:300px;\">\n<caption><center> **Figure 5** : Backward pass </center></caption>\n\n** Initializing backpropagation**:\nTo backpropagate through this network, we know that the output is, \n$A^{[L]} = \\sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \\frac{\\partial \\mathcal{L}}{\\partial A^{[L]}}$.\nTo do so, use this formula (derived using calculus which you don't need in-depth knowledge of):\n```python\ndAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL\n```\n\nYou can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : \n\n$$grads[\"dW\" + str(l)] = dW^{[l]}\\tag{15} $$\n\nFor example, for $l=3$ this would store $dW^{[l]}$ in `grads[\"dW3\"]`.\n\n**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID* model.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: L_model_backward\n\ndef L_model_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n \"\"\"\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n ### START CODE HERE ### (1 line of code)\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) \n ### END CODE HERE ###\n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"dAL, current_cache\". Outputs: \"grads[\"dAL-1\"], grads[\"dWL\"], grads[\"dbL\"]\n ### START CODE HERE ### (approx. 2 lines)\n current_cache = caches[L-1]\n grads[\"dA\" + str(L-1)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, activation = \"sigmoid\")\n ### END CODE HERE ###\n \n # Loop from l=L-2 to l=0\n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 1)], current_cache\". Outputs: \"grads[\"dA\" + str(l)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 1)], current_cache, activation = \"relu\")\n grads[\"dA\" + str(l)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads",
"_____no_output_____"
],
[
"AL, Y_assess, caches = L_model_backward_test_case()\ngrads = L_model_backward(AL, Y_assess, caches)\nprint_grads(grads)",
"dW1 = [[ 0.41010002 0.07807203 0.13798444 0.10502167]\n [ 0. 0. 0. 0. ]\n [ 0.05283652 0.01005865 0.01777766 0.0135308 ]]\ndb1 = [[-0.22007063]\n [ 0. ]\n [-0.02835349]]\ndA1 = [[ 0.12913162 -0.44014127]\n [-0.14175655 0.48317296]\n [ 0.01663708 -0.05670698]]\n"
]
],
[
[
"**Expected Output**\n\n<table style=\"width:60%\">\n \n <tr>\n <td > dW1 </td> \n <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167]\n [ 0. 0. 0. 0. ]\n [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> \n </tr> \n \n <tr>\n <td > db1 </td> \n <td > [[-0.22007063]\n [ 0. ]\n [-0.02835349]] </td> \n </tr> \n \n <tr>\n <td > dA1 </td> \n <td > [[ 0.12913162 -0.44014127]\n [-0.14175655 0.48317296]\n [ 0.01663708 -0.05670698]] </td> \n\n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"### 6.4 - Update Parameters\n\nIn this section you will update the parameters of the model, using gradient descent: \n\n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{16}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{17}$$\n\nwhere $\\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. ",
"_____no_output_____"
],
[
"**Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.\n\n**Instructions**:\nUpdate parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. \n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n ### START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads[\"dW\" + str(l + 1)] \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads[\"db\" + str(l + 1)] \n ### END CODE HERE ###\n return parameters",
"_____no_output_____"
],
[
"parameters, grads = update_parameters_test_case()\nparameters = update_parameters(parameters, grads, 0.1)\n\nprint (\"W1 = \"+ str(parameters[\"W1\"]))\nprint (\"b1 = \"+ str(parameters[\"b1\"]))\nprint (\"W2 = \"+ str(parameters[\"W2\"]))\nprint (\"b2 = \"+ str(parameters[\"b2\"]))",
"W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]\n [-1.76569676 -0.80627147 0.51115557 -1.18258802]\n [-1.0535704 -0.86128581 0.68284052 2.20374577]]\nb1 = [[-0.04659241]\n [-1.28888275]\n [ 0.53405496]]\nW2 = [[-0.55569196 0.0354055 1.32964895]]\nb2 = [[-0.84610769]]\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:100%\"> \n <tr>\n <td > W1 </td> \n <td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008]\n [-1.76569676 -0.80627147 0.51115557 -1.18258802]\n [-1.0535704 -0.86128581 0.68284052 2.20374577]] </td> \n </tr> \n \n <tr>\n <td > b1 </td> \n <td > [[-0.04659241]\n [-1.28888275]\n [ 0.53405496]] </td> \n </tr> \n <tr>\n <td > W2 </td> \n <td > [[-0.55569196 0.0354055 1.32964895]]</td> \n </tr> \n \n <tr>\n <td > b2 </td> \n <td > [[-0.84610769]] </td> \n </tr> \n</table>\n",
"_____no_output_____"
],
[
"\n## 7 - Conclusion\n\nCongrats on implementing all the functions required for building a deep neural network! \n\nWe know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. \n\nIn the next assignment you will put all these together to build two models:\n- A two-layer neural network\n- An L-layer neural network\n\nYou will in fact use these models to classify cat vs non-cat images!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e751b1c3fa46761b3a7720367a5258562a64cd66 | 22,366 | ipynb | Jupyter Notebook | Assignment_5/kNN_logistic_stocks/.ipynb_checkpoints/KNN-checkpoint.ipynb | KyleLeePiupiupiu/CS677_Assignment | c38278e81f4e58cc6ef020fade2c075e9fc09bf7 | [
"MIT"
] | null | null | null | Assignment_5/kNN_logistic_stocks/.ipynb_checkpoints/KNN-checkpoint.ipynb | KyleLeePiupiupiu/CS677_Assignment | c38278e81f4e58cc6ef020fade2c075e9fc09bf7 | [
"MIT"
] | null | null | null | Assignment_5/kNN_logistic_stocks/.ipynb_checkpoints/KNN-checkpoint.ipynb | KyleLeePiupiupiu/CS677_Assignment | c38278e81f4e58cc6ef020fade2c075e9fc09bf7 | [
"MIT"
] | null | null | null | 77.391003 | 13,550 | 0.792766 | [
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix",
"_____no_output_____"
],
[
"df = pd.read_csv('./GOOGL_weekly_return_volatility.csv')\nyear1 = df[df.Year == 2019]\nyear2 = df[df.Year == 2020]",
"_____no_output_____"
],
[
"# year1 knn accuracy\nkList = [3,5,7,9,11]\naccuracy = []\nx = year1[['mean_return', 'volatility']]\ny = year1.label\nxTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size=0.4, random_state=0)\nfor k in kList:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(xTrain, yTrain)\n yPredict = knn.predict(xTest)\n accuracy.append(accuracy_score(yTest, yPredict))\n\nplt.plot(kList, accuracy)\nprint(accuracy)",
"[0.9090909090909091, 0.8636363636363636, 0.8181818181818182, 0.9090909090909091, 0.9090909090909091]\n"
],
[
"# Optimal k is 3,9,11\nkList = [3,9,11]\naccuracy = []\nx = year1[['mean_return', 'volatility']]\ny = year1.label\nxTest = year2[['mean_return', 'volatility']]\nyTest = year2.label\nfor k in kList:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x, y)\n yPredict = knn.predict(xTest)\n print('k = {}, accuracy = {}'.format(k, accuracy_score(yTest, yPredict)))\n\n\n# Confusion Matrix I choose k = 3\nknn = KNeighborsClassifier(n_neighbors=3)\nknn.fit(xTrain, yTrain)\nlabelYear2 = knn.predict(xTest)\n\ntemp = confusion_matrix(yTest, labelYear2)\nprint(temp)\n\ntpr = temp[0][0] / (temp[0][0] + temp[0][1])\ntnr = temp[1][1] / (temp[0][1] + temp[1][1])\nprint('TPR = {}, TNR = {}'.format(tpr, tnr))\n\n",
"k = 3, accuracy = 0.8490566037735849\nk = 9, accuracy = 0.7547169811320755\nk = 11, accuracy = 0.7547169811320755\n[[20 5]\n [ 9 19]]\nTPR = 0.8, TNR = 0.7916666666666666\n"
]
],
[
[
"# Strategy Based on Labels vs BH",
"_____no_output_____"
]
],
[
[
"dfDetail = pd.read_csv(\"./GOOGL_weekly_return_volatility_detailed.csv\")\ndfYear2 = dfDetail[dfDetail.Year == 2020]\nyear2.label = labelYear2\n\n## Add label to detail\nlabelMap = {}\nfor (y, w, l) in zip(year2.Year, year2.Week_Number, year2.label):\n key = (y, w)\n value = l\n labelMap[key] = value\n\ntemp = []\nfor (y, w) in zip(dfYear2.Year, dfYear2.Week_Number):\n key = (y, w)\n temp.append(labelMap[key])\n\n## Extract data\ndfYear2['Label'] = temp\ndfYear2 = dfYear2[['Year', 'Week_Number', 'Close', 'Label']]\n\n## Cut goo2020\ngoo2020Week = []\nfor i in range(53):\n temp = dfYear2[dfYear2.Week_Number == i]\n temp = temp.reset_index(drop=True)\n goo2020Week.append(temp)\n\n",
"C:\\Users\\Lee\\anaconda3\\lib\\site-packages\\pandas\\core\\generic.py:5494: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self[name] = value\n<ipython-input-88-045c94317a87>:18: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n dfYear2['Label'] = temp\n"
],
[
"def proficCalculator(data, fund):\n # Week 0 case\n week1Data = data[0]\n week1Label = week1Data.Label[0] # week 0 label\n\n if week1Label == 1:\n stock = True\n buyPrice = week1Data.Close[0] # week 0 first day price\n sellPrice = week1Data.Close[len(week1Data)-1] # week 0 last day price\n else:\n stock = False\n buyPrice = week1Data.Close[len(week1Data)-1] # week 0 last day price\n sellPrice = week1Data.Close[len(week1Data)-1] # week 0 last day price\n\n\n for df in data[1:]:\n nextWeekColor = df.Label[0]\n nextClosePrice = df.Close[len(df)-1]\n\n # stock + green = no action\n if (stock == True) and (nextWeekColor == 1):\n stock == True # Keep holding the stock\n buyPrice = buyPrice # Buy point stay\n sellPrice = nextClosePrice # Sell point move forward\n\n # stock + red = sell\n elif (stock == True) and (nextWeekColor == 0):\n r = 1 + (sellPrice - buyPrice) / sellPrice\n fund = fund * r\n buyPrice = nextClosePrice\n sellPrice = nextClosePrice\n stock = False\n \n # money + green = buy stock\n elif (stock == False) and (nextWeekColor == 1):\n buyPrice = buyPrice\n sellPrice = nextClosePrice\n stock = True\n # money + red = no action\n elif (stock == False) and (nextWeekColor == 0):\n buyPrice = nextClosePrice\n sellPrice = nextClosePrice\n stock = False\n\n # Last withdraw\n r = 1 + (sellPrice - buyPrice) / sellPrice\n fund = fund * r\n return fund\n\n\nif __name__ == \"__main__\":\n # Trading base on my label\n total = proficCalculator(goo2020Week, 100)\n print(\"Using Label: {}\".format(total))\n\n # Buy and hold \n first = goo2020Week[0]\n first = first.Close[0]\n\n last = goo2020Week[-1]\n last = last.Close[len(last)-1]\n\n r = 1 + (last - first) / last\n total = 100 * r\n print(\"Buy on first day and Sell on last day: {}\".format(total))",
"Using Label: 215.85697376702737\nBuy on first day and Sell on last day: 121.17033527942765\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e751bea69a954a60b272a9bbdea83edee39be16d | 4,508 | ipynb | Jupyter Notebook | examples/ch02/snippets_ipynb/02_03.ipynb | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 4 | 2019-05-04T00:33:25.000Z | 2021-05-29T20:37:59.000Z | examples/ch02/snippets_ipynb/02_03.ipynb | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | null | null | null | examples/ch02/snippets_ipynb/02_03.ipynb | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 3 | 2020-05-05T13:14:28.000Z | 2022-02-03T16:18:37.000Z | 19.431034 | 83 | 0.454969 | [
[
[
"# 2.3 Arithmetic",
"_____no_output_____"
],
[
"### Multiplication (`*`)",
"_____no_output_____"
]
],
[
[
"7 * 4",
"_____no_output_____"
]
],
[
[
"### Exponentiation (`**`)",
"_____no_output_____"
]
],
[
[
"2 ** 10",
"_____no_output_____"
],
[
"9 ** (1 / 2)",
"_____no_output_____"
]
],
[
[
"### True Division (`/`) vs. Floor Division (`//`)",
"_____no_output_____"
]
],
[
[
"7 / 4",
"_____no_output_____"
],
[
"7 // 4",
"_____no_output_____"
],
[
"3 // 5",
"_____no_output_____"
],
[
"14 // 7",
"_____no_output_____"
],
[
"-13 / 4",
"_____no_output_____"
],
[
"-13 // 4",
"_____no_output_____"
]
],
[
[
"### Exceptions and Tracebacks",
"_____no_output_____"
]
],
[
[
"123 / 0",
"_____no_output_____"
],
[
"z + 7",
"_____no_output_____"
]
],
[
[
"### Remainder Operator",
"_____no_output_____"
]
],
[
[
"17 % 5",
"_____no_output_____"
],
[
"7.5 % 3.5",
"_____no_output_____"
]
],
[
[
"### Grouping Expressions with Parentheses",
"_____no_output_____"
]
],
[
[
"10 * (5 + 3)",
"_____no_output_____"
],
[
"10 * 5 + 3",
"_____no_output_____"
],
[
"##########################################################################\n# (C) Copyright 2019 by Deitel & Associates, Inc. and #\n# Pearson Education, Inc. All Rights Reserved. #\n# #\n# DISCLAIMER: The authors and publisher of this book have used their #\n# best efforts in preparing the book. These efforts include the #\n# development, research, and testing of the theories and programs #\n# to determine their effectiveness. The authors and publisher make #\n# no warranty of any kind, expressed or implied, with regard to these #\n# programs or to the documentation contained in these books. The authors #\n# and publisher shall not be liable in any event for incidental or #\n# consequential damages in connection with, or arising out of, the #\n# furnishing, performance, or use of these programs. #\n##########################################################################\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e751bfee2396248d690f91322ec5b3f64993e2c3 | 202,927 | ipynb | Jupyter Notebook | courses/modsim2018/tasks/Desiree/Task11_MotorControl.ipynb | desireemiraldo/bmc | 5e50d806ea2e2d7494035ba120c3cd3a620a156a | [
"MIT"
] | null | null | null | courses/modsim2018/tasks/Desiree/Task11_MotorControl.ipynb | desireemiraldo/bmc | 5e50d806ea2e2d7494035ba120c3cd3a620a156a | [
"MIT"
] | null | null | null | courses/modsim2018/tasks/Desiree/Task11_MotorControl.ipynb | desireemiraldo/bmc | 5e50d806ea2e2d7494035ba120c3cd3a620a156a | [
"MIT"
] | null | null | null | 70.978314 | 32,569 | 0.687168 | [
[
[
"# Task 11 - Motor Control\n### Introduction to modeling and simulation of human movement\nhttps://github.com/BMClab/bmc/blob/master/courses/ModSim2018.md",
"_____no_output_____"
],
[
"* Task (for Lecture 11):\n\nChange the derivative of the contractile element length function. The new function must compute the derivative according to the article from Thelen(2003) (Eq. (6) and (7)):\n\n Thelen D; Adjustment of muscle mechanics model parameters to simulate dynamic contractions in older adults (2003)",
"_____no_output_____"
]
],
[
[
"import numpy as np\n#import pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\n%matplotlib notebook",
"_____no_output_____"
]
],
[
[
"### Muscle properties",
"_____no_output_____"
]
],
[
[
"Lslack = .223\nUmax = .04\nLce_o = .093 #optmal l\nwidth = .63#*Lce_o\nFmax = 3000\na = 1\n#b = .25*10#*Lce_o ",
"_____no_output_____"
]
],
[
[
"### Initial conditions",
"_____no_output_____"
]
],
[
[
"Lnorm_ce = .087/Lce_o #norm\nt0 = 0\ntf = 2.99\nh = 1e-3",
"_____no_output_____"
],
[
"t = np.arange(t0,tf,h)\nF = np.empty(t.shape)\nFkpe = np.empty(t.shape)\nFiberLen = np.empty(t.shape)\nTendonLen = np.empty(t.shape)",
"_____no_output_____"
]
],
[
[
"## Simulation - Series",
"_____no_output_____"
],
[
"for i in range (len(t)):\n #ramp\n if t[i]<=1:\n Lm = 0.31\n elif t[i]>1 and t[i]<2:\n Lm = .31 + .1*(t[i]-1)\n #print(Lm)\n \n #shortening at 4cm/s\n Lsee = Lm - Lce\n \n if Lsee<Lslack: \n F[i] = 0\n else: \n F[i] = Fmax*((Lsee-Lslack)/(Umax*Lslack))**2\n \n \n #isometric force at Lce from CE force length relationship\n F0 = max([0, Fmax*(1-((Lce-Lce_o)/width)**2)])\n \n #calculate CE velocity from Hill's equation\n if F[i]>F0: print('Error: cannot do eccentric contractions')\n \n Lcedot = -b*(F0-F[i])/(F[i]+a) #vel is negative for shortening\n \n # --- Euler integration step\n Lce += h*Lcedot\n\n ",
"_____no_output_____"
]
],
[
[
"def TendonForce (Lnorm_see,Lslack, Lce_o):\n '''\n Compute tendon force\n\n Inputs:\n Lnorm_see = normalized tendon length\n Lslack = slack length of the tendon (non-normalized)\n Lce_o = optimal length of the fiber\n \n Output:\n Fnorm_tendon = normalized tendon force\n \n '''\n Umax = .04\n \n if Lnorm_see<Lslack/Lce_o: \n Fnorm_tendon = 0\n else: \n Fnorm_tendon = ((Lnorm_see-Lslack/Lce_o)/(Umax*Lslack/Lce_o))**2\n \n return Fnorm_tendon",
"_____no_output_____"
],
[
"def ParallelElementForce (Lnorm_ce):\n '''\n Compute parallel element force\n \n Inputs:\n Lnorm_ce = normalized contractile element length\n \n Output:\n Fnorm_kpe = normalized parallel element force\n\n '''\n Umax = 1\n \n if Lnorm_ce< 1: \n Fnorm_kpe = 0\n else: \n Fnorm_kpe = ((Lnorm_ce-1)/(Umax*1))**2 \n \n return Fnorm_kpe",
"_____no_output_____"
],
[
"def ForceLengthCurve (Lnorm_ce,width):\n F0 = max([0, (1-((Lnorm_ce-1)/width)**2)])\n return F0",
"_____no_output_____"
]
],
[
[
"def ContractileElementDot(F0, Fnorm_CE, a, b):\n \n '''\n Compute Contractile Element Derivative\n\n Inputs:\n F0 = Force-Length Curve\n Fce = Contractile element force\n \n Output:\n Lnorm_cedot = normalized contractile element length derivative\n\n '''\n \n if Fnorm_CE>F0: print('Error: cannot do eccentric contractions')\n \n Lnorm_cedot = -b*(F0-Fnorm_CE)/(Fnorm_CE + a) #vel is negative for shortening\n \n return Lnorm_cedot",
"_____no_output_____"
]
],
[
[
"def ContractileElementDot(F0, Fnorm_CE, a):\n \n '''\n Compute Contractile Element Derivative\n\n Inputs:\n F0 = Force-Length Curve\n Fce = Contractile element force\n \n Output:\n Lnorm_cedot = normalized contractile element length derivative\n\n '''\n \n FMlen = 1.4 # young adults\n Vmax = 10 # young adults\n \n if Fnorm_CE > F0:\n \n b = ((2 + 2/(F0*Lce_o))*(F0*FMlen - Fnorm_CE))/(FMlen-1)\n \n elif Fnorm_CE <= F0:\n \n b = F0 + Fnorm_CE/(F0*Lce_o)\n \n Lnorm_cedot = (.25 + .75*a)*Vmax*((Fnorm_CE - F0)/b)\n \n return Lnorm_cedot",
"_____no_output_____"
],
[
"def ContractileElementForce(Fnorm_tendon,Fnorm_kpe):\n '''\n Compute Contractile Element force\n\n Inputs:\n Fnorm_tendon = normalized tendon force\n Fnorm_kpe = normalized parallel element force\n \n Output:\n Fnorm_CE = normalized contractile element force\n '''\n Fnorm_CE = Fnorm_tendon - Fnorm_kpe\n return Fnorm_CE",
"_____no_output_____"
],
[
"def tendonLength(Lm,Lce_o,Lnorm_ce):\n '''\n Compute tendon length\n \n Inputs:\n Lm = \n Lce_o = optimal length of the fiber\n Lnorm_ce = normalized contractile element length\n \n Output:\n Lnorm_see = normalized tendon length \n '''\n Lnorm_see = Lm/Lce_o - Lnorm_ce\n return Lnorm_see",
"_____no_output_____"
],
[
"def activation(a,u,dt):\n '''\n Compute activation\n \n Inputs:\n\n u = \n \n Output:\n a = \n '''\n \n tau_deact = 50e-3 #young adults\n tau_act = 15e-3\n \n if u>a:\n tau_a = tau_act*(0.5+1.5*a)\n elif u <=a:\n tau_a = tau_deact/(0.5+1.5*a)\n \n da/dt = (u-a)/tau_a #resolver diferencial por euler\n\n return a",
"_____no_output_____"
]
],
[
[
"## Simulation - Parallel",
"_____no_output_____"
]
],
[
[
"#Normalizing\n\nfor i in range (len(t)):\n #ramp\n if t[i]<=1:\n Lm = 0.31\n elif t[i]>1 and t[i]<2:\n Lm = .31 - .04*(t[i]-1)\n #print(Lm)\n \n #shortening at 4cm/s\n \n Lnorm_see = tendonLength(Lm,Lce_o,Lnorm_ce)\n\n Fnorm_tendon = TendonForce (Lnorm_see,Lslack, Lce_o) \n \n Fnorm_kpe = ParallelElementForce (Lnorm_ce) \n \n #isometric force at Lce from CE force length relationship\n F0 = ForceLengthCurve (Lnorm_ce,width)\n \n Fnorm_CE = ContractileElementForce(Fnorm_tendon,Fnorm_kpe) #Fnorm_CE = ~Fm\n \n #computing activation\n a = activation(a,u,dt)\n \n #calculate CE velocity from Hill's equation \n Lnorm_cedot = ContractileElementDot(F0, Fnorm_CE,a)\n \n # --- Euler integration step\n Lnorm_ce += h*Lnorm_cedot\n\n \n F[i] = Fnorm_tendon*Fmax\n Fkpe[i] = Fnorm_kpe*Fmax\n FiberLen[i] = Lnorm_ce*Lce_o\n TendonLen[i] = Lnorm_see*Lce_o\n ",
"_____no_output_____"
]
],
[
[
"## Plots ",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True)\n\nax.plot(t,F,c='red')\nplt.grid()\nplt.xlabel('time (s)')\nplt.ylabel('Force (N)')\n\n\nax.legend()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True)\n\nax.plot(t,FiberLen, label = 'fiber')\nax.plot(t,TendonLen, label = 'tendon')\nplt.grid()\nplt.xlabel('time (s)')\nplt.ylabel('Length (m)')\nax.legend(loc='best')\n\n\nfig, ax = plt.subplots(1, 3, figsize=(12,4), sharex=True, sharey=True)\nax[0].plot(t,FiberLen, label = 'fiber')\nax[1].plot(t,TendonLen, label = 'tendon')\nax[2].plot(t,FiberLen + TendonLen, label = 'muscle (tendon + fiber)')\n\nax[1].set_xlabel('time (s)')\nax[0].set_ylabel('Length (m)')\n#plt.legend(loc='best')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e751d691a5f8a235a53e223caeb9cf61fd8cb3a7 | 37,235 | ipynb | Jupyter Notebook | dataset_preparation_noise.ipynb | xaviermouy/minke-whale-dataset | 46e1721569947574a46d4dee0a5f9b30a7200d40 | [
"Apache-2.0"
] | null | null | null | dataset_preparation_noise.ipynb | xaviermouy/minke-whale-dataset | 46e1721569947574a46d4dee0a5f9b30a7200d40 | [
"Apache-2.0"
] | 3 | 2022-01-12T15:10:45.000Z | 2022-02-04T14:09:53.000Z | dataset_preparation_noise.ipynb | xaviermouy/minke-whale-dataset | 46e1721569947574a46d4dee0a5f9b30a7200d40 | [
"Apache-2.0"
] | null | null | null | 30.545529 | 246 | 0.524211 | [
[
[
"# Creation of annotations from unannotated noise recordings\n\n\n## Purpose of this notebook\nThis notebook describes the steps involved in automatically creating noise annotations from non-annotated noise recordings. This notebook is used for creating noise annotations from data provided by the Universty of Aberdeen in Scotland.\n\nAnnotations are made by breaking down the recording into adjacent annotations of a given duration until the end of the file. Min and max frequency of the annotations are 0 Hz and the Nyquist frequency, respectively.\n\n## Deployment folders\n\nThe data provided were separated into folders corresponding to different deployments. As a result, 7 folders were created:\n\n- UK-UAberdeen-MorayFirth-201904_986-110\n- UK-UAberdeen-MorayFirth-201904_1027-235\n- UK-UAberdeen-MorayFirth-201904_1029-237\n- UK-UAberdeen-MorayFirth-202001_1092-112\n- UK-UAberdeen-MorayFirth-202001_1093-164\n- UK-UAberdeen-MorayFirth-202101_1136-164\n- UK-UAberdeen-MorayFirth-202101_1137-112\n\nA deployment_info.csv file was created in each of these folders and contains the metadata for each deployment.\n\n\n",
"_____no_output_____"
],
[
"## Import libraries and define functions used throughout",
"_____no_output_____"
]
],
[
[
"from ecosound.core.annotation import Annotation\nfrom ecosound.core.metadata import DeploymentInfo\nfrom ecosound.core.audiotools import Sound\nfrom ecosound.core.tools import filename_to_datetime\nimport os\nimport pandas as pd\nimport numpy as np\nimport uuid\nfrom datetime import datetime\n\ndef create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass):\n files_list = os.listdir(audio_dir)\n annot_stack = []\n for file in files_list:\n if file.endswith(file_ext):\n print(file)\n # retrieve file start date and time\n file_timestamp = filename_to_datetime(file)\n\n # retrieve file duration\n audio = Sound(os.path.join(audio_dir, file))\n file_dur = audio.file_duration_sec\n\n # define annotations start times (relative to start begining of the audio file)\n t1 = np.arange(0, file_dur, annot_dur_sec)\n t2 = t1[1:]\n t2 = np.append(t2, file_dur)\n # makes sure the last annotation is longer than value defined by the user (annot_dur_sec)\n if t2[-1]-t1[-1] < annot_dur_sec:\n #print(t1)\n #print(t2)\n t1 = np.delete(t1, -1)\n t2 = np.delete(t2, -2)\n #print(t1)\n #print(t2)\n\n # create the annotatiom object\n annot = Annotation()\n\n annot.data['time_min_offset'] = t1\n annot.data['time_max_offset'] = t2\n annot.insert_values(audio_file_start_date=file_timestamp[0])\n annot.data['time_min_date'] = pd.to_datetime(\n annot.data['audio_file_start_date'] + pd.to_timedelta(\n annot.data['time_min_offset'], unit='s'))\n annot.data['time_max_date'] = pd.to_datetime(\n annot.data['audio_file_start_date'] +\n pd.to_timedelta(annot.data['time_max_offset'], unit='s'))\n annot.insert_values(audio_channel=1)\n annot.insert_values(audio_file_name=os.path.splitext(os.path.basename(file))[0])\n annot.insert_values(audio_file_dir=audio_dir)\n annot.insert_values(audio_file_extension=os.path.splitext(file)[1])\n annot.insert_values(frequency_min=0)\n annot.insert_values(software_version=0)\n annot.insert_values(operator_name='xavier')\n annot.insert_values(entry_date=datetime.now())\n annot.insert_values(frequency_max=audio.file_sampling_frequency/2)\n annot.insert_values(label_class=label_class)\n annot.insert_values(label_subclass=label_subclass)\n annot.insert_values(from_detector=False)\n annot.insert_values(software_name='custom_python')\n annot.data['uuid'] = annot.data.apply(lambda _: str(uuid.uuid4()), axis=1)\n annot.data['duration'] = annot.data['time_max_offset'] - annot.data['time_min_offset'] \n # add metadata\n annot.insert_metadata(os.path.join(audio_dir, deployment_file)) \n # stack annotatiosn for each file\n annot_stack.append(annot)\n # check that evrything looks fine\n annot.check_integrity(verbose=False, ignore_frequency_duplicates=True)\n\n # concatenate all annotations\n annot_concat = annot_stack[0]\n for an_idx in range(1, len(annot_stack)):\n annot_concat = annot_concat + annot_stack[an_idx]\n annot_concat.check_integrity(verbose=False, ignore_frequency_duplicates=True)\n return annot_concat",
"_____no_output_____"
]
],
[
[
"### Dataset 1: UK-UAberdeen-MorayFirth-201904_986-110\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-201904_986-110'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = '' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl986_1678036995.190402110017.wav\nDepl986_1678036995.190406225930.wav\nDepl986_1678036995.190410165901.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
],
[
[
"Here is what the annotations look like in Raven:\n\n\n",
"_____no_output_____"
],
[
"### Dataset 2: UK-UAberdeen-MorayFirth-201904_1027-235\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-201904_1027-235'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = '' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl1027_1677725722.190403115956.wav\nDepl1027_1677725722.190411055855.wav\nDepl1027_1677725722.190415235822.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
],
[
[
"### Dataset 3: UK-UAberdeen-MorayFirth-201904_1029-237\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-201904_1029-237'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = '' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl1029_134541352.190403235927.wav\nDepl1029_134541352.190404175922.wav\nDepl1029_134541352.190409115847.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
],
[
[
"### Dataset 4: UK-UAberdeen-MorayFirth-202001_1092-112 (seismic)\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-202001_1092-112'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = 'S' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl1092_1678036995.200101014914.wav\nDepl1092_1678036995.200104224914.wav\nDepl1092_1678036995.200104234914.wav\nDepl1092_1678036995.200111084914.wav\nDepl1092_1678036995.200119004914.wav\nDepl1092_1678036995.200119034914.wav\nDepl1092_1678036995.200121014914.wav\nDepl1092_1678036995.200121214914.wav\nDepl1092_1678036995.200124014914.wav\nDepl1092_1678036995.200124164914.wav\nDepl1092_1678036995.200125184914.wav\nDepl1092_1678036995.200125214914.wav\nDepl1092_1678036995.200128064914.wav\nDepl1092_1678036995.200128134914.wav\nDepl1092_1678036995.200128144914.wav\nDepl1092_1678036995.200201214914.wav\nDepl1092_1678036995.200204224914.wav\nDepl1092_1678036995.200206004914.wav\nDepl1092_1678036995.200213004914.wav\nDepl1092_1678036995.200213024914.wav\nDepl1092_1678036995.200213084914.wav\nDepl1092_1678036995.200226104914.wav\nDepl1092_1678036995.200226124914.wav\nDepl1092_1678036995.200227004914.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
],
[
[
"### Dataset 5: UK-UAberdeen-MorayFirth-202001_1093-164 (seismic)\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-202001_1093-164'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = 'S' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl1093_1677725722.200104205913.wav\nDepl1093_1677725722.200110095913.wav\nDepl1093_1677725722.200110115913.wav\nDepl1093_1677725722.200111205913.wav\nDepl1093_1677725722.200119035913.wav\nDepl1093_1677725722.200121195913.wav\nDepl1093_1677725722.200121235913.wav\nDepl1093_1677725722.200123235913.wav\nDepl1093_1677725722.200124025913.wav\nDepl1093_1677725722.200124165913.wav\nDepl1093_1677725722.200126065913.wav\nDepl1093_1677725722.200126095913.wav\nDepl1093_1677725722.200128135913.wav\nDepl1093_1677725722.200130015913.wav\nDepl1093_1677725722.200131095913.wav\nDepl1093_1677725722.200201185913.wav\nDepl1093_1677725722.200202025913.wav\nDepl1093_1677725722.200204195913.wav\nDepl1093_1677725722.200205085913.wav\nDepl1093_1677725722.200205095913.wav\nDepl1093_1677725722.200205235913.wav\nDepl1093_1677725722.200206015913.wav\nDepl1093_1677725722.200213005913.wav\nDepl1093_1677725722.200213015913.wav\nDepl1093_1677725722.200226235913.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
],
[
[
"### Dataset 6: UK-UAberdeen-MorayFirth-202101_1136-164\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-202101_1136-164'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = '' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl1136_1677725722.210102130002.wav\nDepl1136_1677725722.210103230002.wav\nDepl1136_1677725722.210105030002.wav\nDepl1136_1677725722.210105110002.wav\nDepl1136_1677725722.210119110002.wav\nDepl1136_1677725722.210119180002.wav\nDepl1136_1677725722.210208180002.wav\nDepl1136_1677725722.210216140002.wav\nDepl1136_1677725722.210216170002.wav\nDepl1136_1677725722.210217150002.wav\nDepl1136_1677725722.210220090002.wav\nDepl1136_1677725722.210221010002.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
],
[
[
"### Dataset 7: UK-UAberdeen-MorayFirth-202101_1137-112\n\nDefinition of all the paths of all folders with the raw annotation and audio files for this deployment.",
"_____no_output_____"
]
],
[
[
"audio_dir = r'C:\\Users\\xavier.mouy\\Documents\\GitHub\\minke-whale-dataset\\datasets\\UK-UAberdeen-MorayFirth-202101_1137-112'\ndeployment_file = r'deployment_info.csv' \nfile_ext = 'wav'\n\nannot_dur_sec = 60 # duration of the noise annotations in seconds\nlabel_class = 'NN' # label to use for the noise class\nlabel_subclass = '' # label to use for the noise subclass (if needed, e.g. S for seismic airguns)",
"_____no_output_____"
]
],
[
[
"Now we can create annotations for all audio files in that folder.",
"_____no_output_____"
]
],
[
[
"annot = create_noise_annot(audio_dir, deployment_file, file_ext, annot_dur_sec, label_class, label_subclass)",
"Depl1137_1678508072.210107040002.wav\nDepl1137_1678508072.210108160002.wav\nDepl1137_1678508072.210113150002.wav\nDepl1137_1678508072.210114040002.wav\nDepl1137_1678508072.210116170002.wav\nDepl1137_1678508072.210119040002.wav\nDepl1137_1678508072.210122000002.wav\nDepl1137_1678508072.210123040002.wav\nDepl1137_1678508072.210123120002.wav\nDepl1137_1678508072.210208160002.wav\nDepl1137_1678508072.210211200002.wav\nDepl1137_1678508072.210213110002.wav\n"
]
],
[
[
"Let's look at the summary of annotations that were created:",
"_____no_output_____"
]
],
[
[
"annot.summary()",
"_____no_output_____"
]
],
[
[
"The dataset can now be saved as a Raven annotation file and netcdf4 file:",
"_____no_output_____"
]
],
[
[
"annot.to_netcdf(os.path.join(audio_dir, 'Annotations_dataset_' + annot.data['deployment_ID'][0] +' annotations.nc'))\nannot.to_raven(audio_dir, outfile='Annotations_dataset_' + annot.data['deployment_ID'][0] +'.Table.1.selections.txt', single_file=True)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e751dbd87e70ac5fbdb3a67abd090cfa6ce810c3 | 14,395 | ipynb | Jupyter Notebook | Neural Networks/03_Deep_Net_in_tensorflow_mnist_classification.ipynb | kishore145/AI-ML-Foundations | 87fa76113e10623a8ae552b27dd7bdf53f8a4b20 | [
"MIT"
] | 1 | 2020-06-11T08:20:46.000Z | 2020-06-11T08:20:46.000Z | Neural Networks/03_Deep_Net_in_tensorflow_mnist_classification.ipynb | kishore145/AI-ML-Foundations | 87fa76113e10623a8ae552b27dd7bdf53f8a4b20 | [
"MIT"
] | null | null | null | Neural Networks/03_Deep_Net_in_tensorflow_mnist_classification.ipynb | kishore145/AI-ML-Foundations | 87fa76113e10623a8ae552b27dd7bdf53f8a4b20 | [
"MIT"
] | null | null | null | 40.209497 | 291 | 0.480584 | [
[
[
"<a href=\"https://colab.research.google.com/github/kishore145/AI-ML-Foundations/blob/master/Neural%20Networks/03_Deep_Net_in_tensorflow_mnist_classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Dropout\n\nfrom tensorflow.keras.utils import to_categorical",
"_____no_output_____"
],
[
"# Load mnist data set into train and test splits\n(X_train, y_train), (X_valid, y_valid) = mnist.load_data()",
"_____no_output_____"
],
[
"# Preprocess Data - Same steps as followed in shallow network\nX_train = X_train.reshape(60000, 784).astype('float32')\nX_valid = X_valid.reshape(10000, 784).astype('float32')\n\nX_train /= 255\nX_valid /= 255\n\nn_classes = 10\ny_train = to_categorical(y_train, n_classes)\ny_valid = to_categorical(y_valid, n_classes)",
"_____no_output_____"
],
[
"# Model Designing - For deep neuaral network, general rule of thumb is : \n# 1 i/p layer, 1 o/p layer and 3 or more hidden layers\n\n# Create a sequntial Model\nmodel = Sequential()\n\n# Batch normalization helps normalize the o/p a from one hidden layer to next\n# It works by normalizing z value (w.x + b) prior to running activation function\n# It is similar to input scaling performed on input vector X\n# Key difference is z is not normalized with mean 0 & std dev of 1 like i/p vectors, \n# It includes 2 learnable parameters beta and gamma which shifts the mean and variance of z\n\n# Add first hidden layers with batch normalization\nmodel.add(Dense(64, activation = 'relu', input_shape = (784,)))\nmodel.add(BatchNormalization())\n\n# Add second hidden layer with batch normalization\nmodel.add(Dense(64, activation='relu'))\nmodel.add(BatchNormalization())\n\n# Add third hiddden layer with batch normalization and 20% Dropout\n# Dropout prevent overfitting by dropping certain percentage of neurons in each run\nmodel.add(Dense(64, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(.2)) \n\n# Add o/p softmax layer of 10 neurons\nmodel.add(Dense(10, activation = 'softmax'))\n\n",
"_____no_output_____"
],
[
"# Compile / Configure model\nmodel.compile(optimizer = 'nadam', loss = 'categorical_crossentropy', metrics = ['accuracy'])",
"_____no_output_____"
],
[
"# Review model summary\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 64) 50240 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 64) 256 \n_________________________________________________________________\ndense_1 (Dense) (None, 64) 4160 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 64) 256 \n_________________________________________________________________\ndense_2 (Dense) (None, 64) 4160 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 64) 256 \n_________________________________________________________________\ndropout (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 59,978\nTrainable params: 59,594\nNon-trainable params: 384\n_________________________________________________________________\n"
],
[
"# Fitting the model\nmodel.fit(x=X_train, y= y_train, batch_size=128, epochs = 20, verbose = 1, validation_data=(X_valid, y_valid))",
"Epoch 1/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.3968 - accuracy: 0.8821 - val_loss: 0.1544 - val_accuracy: 0.9520\nEpoch 2/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.1527 - accuracy: 0.9540 - val_loss: 0.1155 - val_accuracy: 0.9640\nEpoch 3/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.1092 - accuracy: 0.9670 - val_loss: 0.0977 - val_accuracy: 0.9686\nEpoch 4/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0875 - accuracy: 0.9733 - val_loss: 0.1023 - val_accuracy: 0.9685\nEpoch 5/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0749 - accuracy: 0.9767 - val_loss: 0.0882 - val_accuracy: 0.9730\nEpoch 6/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0633 - accuracy: 0.9792 - val_loss: 0.0937 - val_accuracy: 0.9715\nEpoch 7/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0573 - accuracy: 0.9816 - val_loss: 0.0896 - val_accuracy: 0.9722\nEpoch 8/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0501 - accuracy: 0.9841 - val_loss: 0.0940 - val_accuracy: 0.9709\nEpoch 9/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0483 - accuracy: 0.9837 - val_loss: 0.0863 - val_accuracy: 0.9753\nEpoch 10/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0426 - accuracy: 0.9856 - val_loss: 0.0981 - val_accuracy: 0.9740\nEpoch 11/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0377 - accuracy: 0.9876 - val_loss: 0.0937 - val_accuracy: 0.9727\nEpoch 12/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0352 - accuracy: 0.9883 - val_loss: 0.0957 - val_accuracy: 0.9728\nEpoch 13/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0326 - accuracy: 0.9890 - val_loss: 0.0890 - val_accuracy: 0.9751\nEpoch 14/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0320 - accuracy: 0.9893 - val_loss: 0.0973 - val_accuracy: 0.9753\nEpoch 15/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0287 - accuracy: 0.9903 - val_loss: 0.0896 - val_accuracy: 0.9761\nEpoch 16/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0280 - accuracy: 0.9903 - val_loss: 0.0909 - val_accuracy: 0.9763\nEpoch 17/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0258 - accuracy: 0.9912 - val_loss: 0.0866 - val_accuracy: 0.9761\nEpoch 18/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0253 - accuracy: 0.9911 - val_loss: 0.0913 - val_accuracy: 0.9759\nEpoch 19/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0227 - accuracy: 0.9924 - val_loss: 0.0902 - val_accuracy: 0.9764\nEpoch 20/20\n469/469 [==============================] - 2s 5ms/step - loss: 0.0223 - accuracy: 0.9921 - val_loss: 0.0893 - val_accuracy: 0.9777\n"
],
[
"# Evaluate model performance\nmodel.evaluate(X_valid, y_valid)",
"313/313 [==============================] - 0s 1ms/step - loss: 0.0893 - accuracy: 0.9777\n"
],
[
"# Importing numpy\nimport numpy as np\n\n# Performing a sample prediction\nX_valid_0 = X_valid[0].reshape(1,784)\ny_pred = model.predict(X_valid_0)\n\n# Print results\nprint(f'Actual y_valid[0] : {y_valid[0]}')\nprint(f'Taking raw np.argmax (Index) : {np.argmax(y_pred)}')\nprint(f'Taking np.argmax(predict, axis = -1): {np.argmax(model.predict(X_valid_0), axis=-1)}')\nprint(f'Raw prediction using predict method : {[round(i,0) for i in y_pred[0]]}') # Predict method expects an array of i/p\n#print(f'Using Pred class : {model.predict_classes(X_valid_0)}') #-- Deprecated",
"Actual y_valid[0] : [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]\nTaking raw np.argmax (Index) : 7\nTaking np.argmax(predict, axis = -1): [7]\nRaw prediction using predict method : [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]\n"
],
[
"# Concludes basic deep neural network architecture for classification problem based on JonKrohn's lectures. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e751fd19971008c8b6b59a2867f37959fb8650c3 | 18,437 | ipynb | Jupyter Notebook | prototyping/auto-segmentation/sb/05-mini-data-take-2/072-score-with-scharr.ipynb | dg1an3/pymedphys | bdca9c783aae8b5e1f231e6cb0bc69895e4b9329 | [
"Apache-2.0"
] | 2 | 2020-02-04T03:21:20.000Z | 2020-04-11T14:17:53.000Z | prototyping/auto-segmentation/sb/05-mini-data-take-2/072-score-with-scharr.ipynb | SimonBiggs/pymedphys | 83f02eac6549ac155c6963e0a8d1f9284359b652 | [
"Apache-2.0"
] | null | null | null | prototyping/auto-segmentation/sb/05-mini-data-take-2/072-score-with-scharr.ipynb | SimonBiggs/pymedphys | 83f02eac6549ac155c6963e0a8d1f9284359b652 | [
"Apache-2.0"
] | null | null | null | 25.642559 | 176 | 0.532733 | [
[
[
"# CT scan UNet demo\n\nThis notebook creates a UNet for a minified dataset of animal CTs.\n\nIf you are on Google Colab, make this train quicker by swapping to a GPU runtime. This is done by clicking `Runtime`, then `Change runtime type`, then selecting `GPU`:\n\n\n\n",
"_____no_output_____"
]
],
[
[
"import pathlib\nimport urllib.request\nimport shutil\nimport collections\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nimport imageio\nimport skimage.filters",
"_____no_output_____"
],
[
"zip_url = 'https://zenodo.org/record/4448689/files/minified-animal-patient-brain-orbits.zip?download=1'\nzip_filepath = 'data.zip'\n\ndata_directory = pathlib.Path('data')\n\nif not data_directory.exists():\n urllib.request.urlretrieve(zip_url, zip_filepath)\n shutil.unpack_archive(zip_filepath, data_directory)",
"_____no_output_____"
],
[
"dataset_types = [path.name for path in data_directory.glob('*') if path.is_dir()]\ndataset_types",
"_____no_output_____"
],
[
"def _load_image(image_path):\n png_image = imageio.imread(image_path)\n normalised_image = png_image[:,:,None] / 255\n \n return normalised_image\n\n\ndef _load_mask(mask_path):\n png_mask = imageio.imread(mask_path)\n normalised_mask = png_mask / 255\n \n return normalised_mask",
"_____no_output_____"
],
[
"def load_dataset_type(dataset_type, shuffle=True):\n image_suffix = '_image.png'\n mask_suffix = '_mask.png'\n \n image_paths = list(data_directory.joinpath(dataset_type).glob(f'**/*{image_suffix}'))\n if shuffle:\n np.random.shuffle(image_paths)\n \n mask_paths = [\n path.parent / path.name.replace(image_suffix, mask_suffix)\n for path in image_paths\n ]\n \n image_arrays = [\n _load_image(image_path)\n for image_path in image_paths\n ]\n mask_arrays = [\n _load_mask(mask_path)\n for mask_path in mask_paths\n ]\n \n images = np.array(image_arrays)\n masks = np.array(mask_arrays)\n \n return images, masks",
"_____no_output_____"
],
[
"training_images, training_masks = load_dataset_type('training')\nvalidation_images, validation_masks = load_dataset_type('validation', shuffle=False)",
"_____no_output_____"
],
[
"def _find_image_with_most_variety(images, masks):\n has_brain = np.sum(masks[:,:,:,1], axis=(1,2))\n has_eyes = np.sum(masks[:,:,:,0], axis=(1,2))\n\n brain_sort = 1 - np.argsort(has_brain) / len(has_brain)\n eyes_sort = 1 - np.argsort(has_eyes) / len(has_eyes)\n\n max_combo = np.argmax(brain_sort * eyes_sort * has_brain * has_eyes)\n\n sample_image = images[max_combo,:,:,:]\n sample_mask = masks[max_combo,:,:,:]\n \n return sample_image, sample_mask\n\n\nsample_image, sample_mask = _find_image_with_most_variety(\n validation_images, validation_masks\n)",
"_____no_output_____"
],
[
"def display(image, mask, prediction=None):\n plt.figure(figsize=(18, 5))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n \n plt.subplot(1, 3, 1)\n plt.title('Input Image') \n plt.imshow(image[:,:,0])\n plt.colorbar()\n plt.axis('off')\n \n plt.subplot(1, 3, 2)\n plt.title('True Mask') \n plt.imshow(mask)\n plt.colorbar()\n plt.axis('off')\n\n if prediction is None:\n try:\n prediction = model.predict(image[None, ...])[0, ...]\n except NameError:\n return\n\n plt.subplot(1, 3, 3)\n plt.title('Predicted Mask') \n plt.imshow(prediction)\n plt.colorbar()\n plt.axis('off')\n\n \n \nclass DisplayCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n display(sample_image, sample_mask)\n plt.show()\n print ('\\nSample Prediction after epoch {}\\n'.format(epoch+1))\n \n \ndisplay(sample_image, sample_mask)",
"_____no_output_____"
],
[
"def _activation(x):\n x = tf.keras.layers.Activation(\"relu\")(x)\n\n return x\n\n\ndef _convolution(x, number_of_filters, kernel_size=3):\n x = tf.keras.layers.Conv2D(\n number_of_filters, kernel_size, padding=\"same\", kernel_initializer=\"he_normal\"\n )(x)\n\n return x\n\n\ndef _conv_transpose(x, number_of_filters, kernel_size=3):\n x = tf.keras.layers.Conv2DTranspose(\n number_of_filters,\n kernel_size,\n strides=2,\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n )(x)\n\n return x",
"_____no_output_____"
],
[
"def encode(\n x,\n number_of_filters,\n number_of_convolutions=2,\n):\n for _ in range(number_of_convolutions):\n x = _convolution(x, number_of_filters)\n x = _activation(x)\n skip = x\n\n x = tf.keras.layers.MaxPool2D()(x)\n x = _activation(x)\n\n return x, skip",
"_____no_output_____"
],
[
"def decode(\n x,\n skip,\n number_of_filters,\n number_of_convolutions=2,\n):\n x = _conv_transpose(x, number_of_filters)\n x = _activation(x)\n\n x = tf.keras.layers.concatenate([skip, x], axis=3)\n\n for _ in range(number_of_convolutions):\n x = _convolution(x, number_of_filters)\n x = _activation(x)\n\n return x",
"_____no_output_____"
],
[
"mask_dims = training_masks.shape\nassert mask_dims[1] == mask_dims[2]\ngrid_size = int(mask_dims[2])\noutput_channels = int(mask_dims[-1])",
"_____no_output_____"
],
[
"inputs = tf.keras.layers.Input((grid_size, grid_size, 1))\nx = inputs\nskips = []\n\nfor number_of_filters in [32, 64, 128]:\n x, skip = encode(x, number_of_filters)\n skips.append(skip)\n \nskips.reverse()\n\nfor number_of_filters, skip in zip([256, 128, 64], skips):\n x = decode(x, skip, number_of_filters)\n \nx = tf.keras.layers.Conv2D(\n output_channels,\n 1,\n activation=\"sigmoid\",\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n)(x)\n\nmodel = tf.keras.Model(inputs=inputs, outputs=x)",
"_____no_output_____"
],
[
"model.summary()",
"_____no_output_____"
],
[
"model.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=[\n tf.keras.metrics.BinaryAccuracy(),\n tf.keras.metrics.Recall(),\n tf.keras.metrics.Precision()\n ]\n)\n\ndisplay(sample_image, sample_mask)",
"_____no_output_____"
],
[
"history = model.fit(\n training_images, \n training_masks,\n epochs=20,\n validation_data=(validation_images, validation_masks),\n callbacks=[DisplayCallback()]\n)",
"_____no_output_____"
],
[
"predictions = model.predict(validation_images)\n\n\nfor image, mask, prediction in zip(validation_images, validation_masks, predictions):\n display(image, mask, prediction)",
"_____no_output_____"
],
[
"example_patient_mask = validation_masks[0,:,:,2]\nexample_patient_prediction = predictions[0,:,:,2]",
"_____no_output_____"
],
[
"plt.imshow(validation_images[0,:,:,0])",
"_____no_output_____"
],
[
"plt.imshow(example_patient_mask)",
"_____no_output_____"
],
[
"plt.imshow(example_patient_prediction)",
"_____no_output_____"
],
[
"edge_filtered_mask = skimage.filters.scharr(example_patient_mask)\nplt.imshow(edge_filtered_mask)",
"_____no_output_____"
],
[
"edge_filtered_prediction = skimage.filters.scharr(example_patient_prediction)\nplt.imshow(edge_filtered_prediction)",
"_____no_output_____"
],
[
"score = 1 - np.sum(np.abs(edge_filtered_mask - edge_filtered_prediction)) / np.sum(\n edge_filtered_mask + edge_filtered_prediction\n)\nscore # 1, perfect agreement | 0, no overlap",
"_____no_output_____"
],
[
"def soft_surface_dice(reference, evaluation):\n edge_reference = skimage.filters.scharr(reference)\n edge_evaluation = skimage.filters.scharr(evaluation)\n\n if np.sum(edge_reference) == 0:\n return np.nan\n\n score = np.sum(np.abs(edge_evaluation - edge_reference)) / np.sum(\n edge_evaluation + edge_reference\n )\n\n return 1 - score\n\n\nlabels = ['eyes', 'brain', 'patient']\ndef get_scores(validation_masks, predictions):\n scores = collections.defaultdict(lambda: [])\n for mask, prediction in zip(validation_masks, predictions):\n for i, label in enumerate(labels):\n scores[label].append(soft_surface_dice(mask[..., i], prediction[..., i]))\n\n return scores\n\nscores = get_scores(validation_masks, predictions)",
"_____no_output_____"
],
[
"np.nanmean(scores['eyes'])",
"_____no_output_____"
],
[
"np.nanmean(scores['brain'])",
"_____no_output_____"
],
[
"np.nanmean(scores['patient'])",
"_____no_output_____"
],
[
"def display_scores():\n predictions = model.predict(validation_images)\n scores = get_scores(validation_masks, predictions)\n\n print(f\"Eyes: {round(np.nanmean(scores['eyes']), 4)}\")\n print(f\"Brain: {round(np.nanmean(scores['brain']), 4)}\")\n print(f\"Patient: {round(np.nanmean(scores['patient']), 4)}\")",
"_____no_output_____"
],
[
"class DisplayCallbackWithScores(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n display(sample_image, sample_mask)\n plt.show()\n display_scores()\n print ('\\nSample Prediction after epoch {}\\n'.format(epoch+1))",
"_____no_output_____"
],
[
"history = model.fit(\n training_images, \n training_masks,\n epochs=100,\n validation_data=(validation_images, validation_masks),\n callbacks=[DisplayCallbackWithScores()]\n)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e75215f4078666f002b987b6b8c6efd811bd7ce2 | 7,387 | ipynb | Jupyter Notebook | Statistical Imputation With SimpleImputer.ipynb | PacktPublishing/Data-Cleansing-Master-Class-in-Python | 47e04c258ec31e8011e62d081beb45434fd3948f | [
"MIT"
] | 3 | 2021-11-08T22:25:35.000Z | 2022-01-05T16:33:53.000Z | Statistical Imputation With SimpleImputer.ipynb | PacktPublishing/Data-Cleansing-Master-Class-in-Python | 47e04c258ec31e8011e62d081beb45434fd3948f | [
"MIT"
] | null | null | null | Statistical Imputation With SimpleImputer.ipynb | PacktPublishing/Data-Cleansing-Master-Class-in-Python | 47e04c258ec31e8011e62d081beb45434fd3948f | [
"MIT"
] | 4 | 2021-12-21T17:42:41.000Z | 2022-01-16T23:17:12.000Z | 33.730594 | 92 | 0.447949 | [
[
[
"# summarize the horse colic dataset\nfrom pandas import read_csv\n# load dataset\ndataframe = read_csv('horse-colic.csv', header=None,)\n# summarize the first few rows\nprint(dataframe.head())",
" 0 1 2 3 4 5 6 7 8 9 ... 18 19 20 21 22 23 \\\n0 2 1 530101 38.50 66 28 3 3 ? 2 ... 45.00 8.40 ? ? 2 2 \n1 1 1 534817 39.2 88 20 ? ? 4 1 ... 50 85 2 2 3 2 \n2 2 1 530334 38.30 40 24 1 1 3 1 ... 33.00 6.70 ? ? 1 2 \n3 1 9 5290409 39.10 164 84 4 1 6 2 ... 48.00 7.20 3 5.30 2 1 \n4 2 1 530255 37.30 104 35 ? ? 6 2 ... 74.00 7.40 ? ? 2 2 \n\n 24 25 26 27 \n0 11300 0 0 2 \n1 2208 0 0 2 \n2 0 0 0 1 \n3 2208 0 0 1 \n4 4300 0 0 2 \n\n[5 rows x 28 columns]\n"
],
[
"# summarize the horse colic dataset\nfrom pandas import read_csv\n# load dataset\ndataframe = read_csv('horse-colic.csv', header=None, na_values='?')\n# summarize the first few rows\nprint(dataframe.head())\n# summarize the number of rows with missing values for each column\nfor i in range(dataframe.shape[1]):\n# count number of rows with missing values\n n_miss = dataframe[[i]].isnull().sum()\n perc = n_miss / dataframe.shape[0] * 100\nprint('> %d, Missing: %d (%.1f%%)' % (i, n_miss, perc))",
" 0 1 2 3 4 5 6 7 8 9 ... 18 19 \\\n0 2.0 1 530101 38.5 66.0 28.0 3.0 3.0 NaN 2.0 ... 45.0 8.4 \n1 1.0 1 534817 39.2 88.0 20.0 NaN NaN 4.0 1.0 ... 50.0 85.0 \n2 2.0 1 530334 38.3 40.0 24.0 1.0 1.0 3.0 1.0 ... 33.0 6.7 \n3 1.0 9 5290409 39.1 164.0 84.0 4.0 1.0 6.0 2.0 ... 48.0 7.2 \n4 2.0 1 530255 37.3 104.0 35.0 NaN NaN 6.0 2.0 ... 74.0 7.4 \n\n 20 21 22 23 24 25 26 27 \n0 NaN NaN 2.0 2 11300 0 0 2 \n1 2.0 2.0 3.0 2 2208 0 0 2 \n2 NaN NaN 1.0 2 0 0 0 1 \n3 3.0 5.3 2.0 1 2208 0 0 1 \n4 NaN NaN 2.0 2 4300 0 0 2 \n\n[5 rows x 28 columns]\n> 27, Missing: 0 (0.0%)\n"
],
[
"# summarize the horse colic dataset\nfrom pandas import read_csv\n# load dataset\ndataframe = read_csv('horse-colic.csv', header=None, na_values='?')\n# summarize the first few rows\nprint(dataframe.head())\n# summarize the number of rows with missing values for each column\nfor i in range(dataframe.shape[1]):\n# count number of rows with missing values\n n_miss = dataframe[[i]].isnull().sum()\n perc = n_miss / dataframe.shape[0] * 100\n print('> %d, Missing: %d (%.1f%%)' % (i, n_miss, perc))",
" 0 1 2 3 4 5 6 7 8 9 ... 18 19 \\\n0 2.0 1 530101 38.5 66.0 28.0 3.0 3.0 NaN 2.0 ... 45.0 8.4 \n1 1.0 1 534817 39.2 88.0 20.0 NaN NaN 4.0 1.0 ... 50.0 85.0 \n2 2.0 1 530334 38.3 40.0 24.0 1.0 1.0 3.0 1.0 ... 33.0 6.7 \n3 1.0 9 5290409 39.1 164.0 84.0 4.0 1.0 6.0 2.0 ... 48.0 7.2 \n4 2.0 1 530255 37.3 104.0 35.0 NaN NaN 6.0 2.0 ... 74.0 7.4 \n\n 20 21 22 23 24 25 26 27 \n0 NaN NaN 2.0 2 11300 0 0 2 \n1 2.0 2.0 3.0 2 2208 0 0 2 \n2 NaN NaN 1.0 2 0 0 0 1 \n3 3.0 5.3 2.0 1 2208 0 0 1 \n4 NaN NaN 2.0 2 4300 0 0 2 \n\n[5 rows x 28 columns]\n> 0, Missing: 1 (0.3%)\n> 1, Missing: 0 (0.0%)\n> 2, Missing: 0 (0.0%)\n> 3, Missing: 60 (20.0%)\n> 4, Missing: 24 (8.0%)\n> 5, Missing: 58 (19.3%)\n> 6, Missing: 56 (18.7%)\n> 7, Missing: 69 (23.0%)\n> 8, Missing: 47 (15.7%)\n> 9, Missing: 32 (10.7%)\n> 10, Missing: 55 (18.3%)\n> 11, Missing: 44 (14.7%)\n> 12, Missing: 56 (18.7%)\n> 13, Missing: 104 (34.7%)\n> 14, Missing: 106 (35.3%)\n> 15, Missing: 247 (82.3%)\n> 16, Missing: 102 (34.0%)\n> 17, Missing: 118 (39.3%)\n> 18, Missing: 29 (9.7%)\n> 19, Missing: 33 (11.0%)\n> 20, Missing: 165 (55.0%)\n> 21, Missing: 198 (66.0%)\n> 22, Missing: 1 (0.3%)\n> 23, Missing: 0 (0.0%)\n> 24, Missing: 0 (0.0%)\n> 25, Missing: 0 (0.0%)\n> 26, Missing: 0 (0.0%)\n> 27, Missing: 0 (0.0%)\n"
],
[
"# statistical imputation transform for the horse colic dataset\nfrom numpy import isnan\nfrom pandas import read_csv\nfrom sklearn.impute import SimpleImputer\n# load dataset\ndataframe = read_csv('horse-colic.csv', header=None, na_values='?')\n# split into input and output elements\ndata = dataframe.values\nix = [i for i in range(data.shape[1]) if i != 23]\nX, y = data[:, ix], data[:, 23]\n# summarize total missing\nprint('Missing: %d' % sum(isnan(X).flatten()))\n# define imputer\nimputer = SimpleImputer(strategy='mean')\n# fit on the dataset\nimputer.fit(X)\n# transform the dataset\nXtrans = imputer.transform(X)\n# summarize total missing\nprint('Missing: %d' % sum(isnan(Xtrans).flatten()))",
"Missing: 1605\nMissing: 0\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7521b64a972a5327e9a9a8029c39a86a1d319b6 | 636,613 | ipynb | Jupyter Notebook | data-512-a2/notebook.ipynb | jameslee0920/data-512 | 11b036f1f3bc385c4a701bec167399fcf0eb7b4d | [
"MIT"
] | null | null | null | data-512-a2/notebook.ipynb | jameslee0920/data-512 | 11b036f1f3bc385c4a701bec167399fcf0eb7b4d | [
"MIT"
] | null | null | null | data-512-a2/notebook.ipynb | jameslee0920/data-512 | 11b036f1f3bc385c4a701bec167399fcf0eb7b4d | [
"MIT"
] | null | null | null | 314.687593 | 58,208 | 0.916273 | [
[
[
"### Load Packages\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nrcParams['figure.figsize'] = [20, 10]",
"_____no_output_____"
]
],
[
[
"# Gathering Data\n",
"_____no_output_____"
],
[
"### Load Datasets, Toxicity & Aggression\n",
"_____no_output_____"
]
],
[
[
"filepath = \"../../../Documents/\"\naggression = filepath+\"Aggression/\"\ntoxicity = filepath+\"Toxicity/\"\ntoxic_annotated_comments = pd.read_csv(toxicity+\"toxicity_annotated_comments.tsv\", sep='\\t')\ntoxic_annotations = pd.read_csv(toxicity+\"toxicity_annotations.tsv\", sep='\\t')\ntoxic_demographics = pd.read_csv(toxicity+\"toxicity_worker_demographics.tsv\", sep= '\\t')\nagg_annotated_comments = pd.read_csv(aggression+\"aggression_annotated_comments.tsv\", sep='\\t')\nagg_annotations = pd.read_csv(aggression+\"aggression_annotations.tsv\", sep='\\t')\nagg_demographics = pd.read_csv(aggression+\"aggression_worker_demographics.tsv\", sep= '\\t')\n",
"_____no_output_____"
],
[
"toxic_annotations",
"_____no_output_____"
]
],
[
[
"### What is inside the [Datasets](https://meta.wikimedia.org/wiki/Research:Detox/Data_Release)? ",
"_____no_output_____"
]
],
[
[
"toxic_df = toxic_annotations.merge(toxic_annotated_comments, on= 'rev_id', how = 'left').merge(toxic_demographics, on = 'worker_id', how = 'left')\ntoxic_df.head()",
"_____no_output_____"
],
[
"#Some worker ids not included in toxic demographics\nlen(set(toxic_annotations.worker_id) - set(toxic_demographics.worker_id))",
"_____no_output_____"
],
[
"aggressions_df = agg_annotations.merge(agg_annotated_comments, on= 'rev_id', how = 'left').merge(agg_demographics, on = 'worker_id', how = 'left')\naggressions_df.head()",
"_____no_output_____"
],
[
"#Some worker ids not included in toxic demographics\nlen(set(agg_annotations.worker_id) - set(agg_demographics.worker_id))",
"_____no_output_____"
]
],
[
[
"# Research Questions",
"_____no_output_____"
],
[
"## How has toxicity and aggression changed over time?",
"_____no_output_____"
],
[
"### Toxicity",
"_____no_output_____"
],
[
"Toxicity is measured two ways. We can see the ratio of toxic comments vs non toxic comments over time, and the average toxicity score over time. Toxicity is a binary value column that we can do a count over number of rows to find the ratio toxic comments. Toxicity Score is a quantitative value ranging from -2 to 2 with negative values indicating a more toxic comment. For visualization purposes, to keep higher y axis values indicating greater toxicity, toxicity scores will be reversed.",
"_____no_output_____"
]
],
[
[
"toxic_df.head()",
"_____no_output_____"
],
[
"plt.hist(toxic_df.toxicity_score)\nplt.title(\"Toxicity Score Distribution\")",
"_____no_output_____"
]
],
[
[
"Toxicity Scores appear to have more positive or healthy comments than toxic comments",
"_____no_output_____"
],
[
"### Lets first look at a general time trend of the entire dataset on a per year basis",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"col_rename = {'rev_id':'count'}\ntoxic_df_trend = toxic_df.groupby('year').agg({'rev_id':'count', 'toxicity':'sum', 'toxicity_score':'mean'}).reset_index().rename(columns = col_rename)\ntoxic_df_trend['toxicity_ratio'] = toxic_df_trend['toxicity'] / toxic_df_trend['count']\ntoxic_df_trend['toxicity_score_reversed'] = -1 * toxic_df_trend['toxicity_score']\ntoxic_df_trend.head()\n\n",
"_____no_output_____"
]
],
[
[
"#### Toxicity Ratio",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(toxic_df_trend.year, toxic_df_trend.toxicity_ratio)\nplt.grid(True)\nplt.title(\"Toxicity Ratio Over Time\")",
"_____no_output_____"
]
],
[
[
"#### Toxicity Score",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(toxic_df_trend.year, toxic_df_trend.toxicity_score_reversed)\nplt.grid(True)\nplt.title(\"Toxicity Score Over Time\")",
"_____no_output_____"
]
],
[
[
"Interestingly, there appears to be a rapid increase in both the average toxicity score and the ratio of toxic comments from the beginning of the dataset to around 2008. However, the rates appear to flat out remaining consistent in later years.\nMy next question is, how do these results change for a user that was logged in vs not logged in? ",
"_____no_output_____"
],
[
"### Lets look at the difference of loging in vs not logging in",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"col_rename = {'rev_id':'count'}\ntoxic_df_trend_logged_in = toxic_df[toxic_df.logged_in == True].groupby('year').agg({'rev_id':'count', 'toxicity':'sum', 'toxicity_score':'mean'}).reset_index().rename(columns = col_rename)\ntoxic_df_trend_logged_in['toxicity_ratio'] = toxic_df_trend_logged_in['toxicity'] / toxic_df_trend_logged_in['count']\ntoxic_df_trend_logged_in['toxicity_score_reversed'] = -1 * toxic_df_trend_logged_in['toxicity_score']\ntoxic_df_trend_not_logged_in = toxic_df[toxic_df.logged_in == False].groupby('year').agg({'rev_id':'count', 'toxicity':'sum', 'toxicity_score':'mean'}).reset_index().rename(columns = col_rename)\ntoxic_df_trend_not_logged_in['toxicity_ratio'] = toxic_df_trend_not_logged_in['toxicity'] / toxic_df_trend_not_logged_in['count']\ntoxic_df_trend_not_logged_in['toxicity_score_reversed'] = -1 * toxic_df_trend_not_logged_in['toxicity_score']\n\n",
"_____no_output_____"
]
],
[
[
"#### Toxicity Ratio Log In",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(toxic_df_trend.year, toxic_df_trend.toxicity_ratio, color = 'black', label ='Total')\nplt.plot(toxic_df_trend_logged_in.year, toxic_df_trend_logged_in.toxicity_ratio, color = 'blue', label ='Logged In')\nplt.plot(toxic_df_trend_not_logged_in.year, toxic_df_trend_not_logged_in.toxicity_ratio, color = 'red', label ='Not Logged In')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Toxicity Ratio Overtime\")",
"_____no_output_____"
]
],
[
[
"#### Toxicity Score Log In",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(toxic_df_trend.year, toxic_df_trend.toxicity_score_reversed, color = 'black', label ='Total')\nplt.plot(toxic_df_trend_logged_in.year, toxic_df_trend_logged_in.toxicity_score_reversed, color = 'blue', label ='Logged In')\nplt.plot(toxic_df_trend_not_logged_in.year, toxic_df_trend_not_logged_in.toxicity_score_reversed, color = 'red', label ='Not Logged In')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Toxicity Score Over Time\")",
"_____no_output_____"
]
],
[
[
"There appears to be a big difference in both the scoring and ratio when the user is logged in or not. On the years after 2005 for the not logged in group, the average (reversed) toxicity score turns positive, while remaining a ratio of less than 0.5, indicating that the comments are very high in toxicity for users not logged in. The overall shape of the curves remain consistent with the total, regardless of whether the user was logged in or not.\n\nThe next question is how do the results look for the different samplings? There are two different samples in the dataset. One sample was collected randomly, and the other sample was collected from blocked samples that were blocked for violating policy whether it be for harassment or for personal attacks. ",
"_____no_output_____"
],
[
"### Lets look at the difference of sampling methods",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"\ncol_rename = {'rev_id':'count'}\ntoxic_df_trend_blocked = toxic_df[toxic_df['sample'] == 'blocked'].groupby('year').agg({'rev_id':'count', 'toxicity':'sum', 'toxicity_score':'mean'}).reset_index().rename(columns = col_rename)\ntoxic_df_trend_blocked['toxicity_ratio'] = toxic_df_trend_blocked['toxicity'] / toxic_df_trend_blocked['count']\ntoxic_df_trend_blocked['toxicity_score_reversed'] = -1 * toxic_df_trend_blocked['toxicity_score']\ntoxic_df_trend_random = toxic_df[toxic_df['sample'] == 'random'].groupby('year').agg({'rev_id':'count', 'toxicity':'sum', 'toxicity_score':'mean'}).reset_index().rename(columns = col_rename)\ntoxic_df_trend_random['toxicity_ratio'] = toxic_df_trend_random['toxicity'] / toxic_df_trend_random['count']\ntoxic_df_trend_random['toxicity_score_reversed'] = -1 * toxic_df_trend_random['toxicity_score']\n\n",
"_____no_output_____"
]
],
[
[
"#### Toxicity Ratio Sampling",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(toxic_df_trend.year, toxic_df_trend.toxicity_ratio, color = 'black', label ='Total')\nplt.plot(toxic_df_trend_random.year, toxic_df_trend_random.toxicity_ratio, color = 'blue', label ='Random')\nplt.plot(toxic_df_trend_blocked.year, toxic_df_trend_blocked.toxicity_ratio, color = 'red', label ='Blocked')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Toxicity Ratio Overtime\")",
"_____no_output_____"
]
],
[
[
"#### Toxicity Score Sampling",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(toxic_df_trend.year, toxic_df_trend.toxicity_score_reversed, color = 'black', label ='Total')\nplt.plot(toxic_df_trend_random.year, toxic_df_trend_random.toxicity_score_reversed, color = 'blue', label ='Random')\nplt.plot(toxic_df_trend_blocked.year, toxic_df_trend_blocked.toxicity_score_reversed, color = 'red', label ='Blocked')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Toxicity Score Over Time\")",
"_____no_output_____"
]
],
[
[
"There appears to be a big difference in both the scoring and ratio when the sampling is done from blocked sources or when it is collected randomly. When sampled from blocked sources, the toxicity score turns positive right on its peak at 2008, but drops to negative values after 2009. When the sample is picked randomly, the overall trend is different in that it remains consistent throughout the years. Previously, when sampled from all sources, the toxicity has been shooting upwards until 2008 and have remained consistent in leater years. ",
"_____no_output_____"
],
[
"### Aggression",
"_____no_output_____"
],
[
"Aggression, like toxicity is measured two ways. We can see the ratio of aggressive comments vs non aggressive comments over time, and the average aggression score over time. Aggression is a binary value column that we can do a count over number of rows to find the ratio aggressive comments. Aggression Score is a quantitative value ranging from -2 to 2 with negative values indicating a more aggressive comment. As done previously on toxicity, for visualization purposes, to keep higher y axis values indicating greater aggresion, aggression scores will be reversed.",
"_____no_output_____"
]
],
[
[
"aggressions_df.head()",
"_____no_output_____"
]
],
[
[
"### Lets first look at a general time trend of the entire dataset on a per year basis",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"col_rename = {'rev_id':'count'}\naggressions_df_trend = aggressions_df.groupby('year').agg({'rev_id':'count', 'aggression':'sum', 'aggression_score':'mean'}).reset_index().rename(columns = col_rename)\naggressions_df_trend['aggression_ratio'] = aggressions_df_trend['aggression'] / aggressions_df_trend['count']\naggressions_df_trend['aggression_score_reversed'] = -1 * aggressions_df_trend['aggression_score']\naggressions_df_trend.head()\n\n",
"_____no_output_____"
],
[
"plt.hist(aggressions_df.aggression_score)\nplt.title(\"Aggression Score Distribution\")",
"_____no_output_____"
]
],
[
[
"While neutral comments are highly dominant in here, there appears to be slightly more aggressive comments than healthy comments.",
"_____no_output_____"
],
[
"#### Aggression Ratio",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(aggressions_df_trend.year, aggressions_df_trend.aggression_ratio)\nplt.grid(True)\nplt.title(\"Aggression Ratio Over Time\")",
"_____no_output_____"
]
],
[
[
"#### Agression Score",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(aggressions_df_trend.year, aggressions_df_trend.aggression_score_reversed)\nplt.grid(True)\nplt.title(\"Aggression Score Over Time\")",
"_____no_output_____"
]
],
[
[
"The overall trend here is similar to what we saw with toxicity. There appears to be a sharp increase until year 2008 at its peak, plateauing consistently afterwards. The scoress however, appear to reach positive values before its peak at 2005 and remaining positive. While most comments appear to be neutral, there are more aggressive comments than healthy comments. ",
"_____no_output_____"
],
[
"### Lets look at the difference of log in vs not logged in",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"col_rename = {'rev_id':'count'}\naggressions_df_trend_logged_in = aggressions_df[aggressions_df.logged_in == True].groupby('year').agg({'rev_id':'count', 'aggression':'sum', 'aggression_score':'mean'}).reset_index().rename(columns = col_rename)\naggressions_df_trend_logged_in['aggression_ratio'] = aggressions_df_trend_logged_in['aggression'] / aggressions_df_trend_logged_in['count']\naggressions_df_trend_logged_in['aggression_score_reversed'] = -1 * aggressions_df_trend_logged_in['aggression_score']\naggressions_df_trend_not_logged_in = aggressions_df[aggressions_df.logged_in == False].groupby('year').agg({'rev_id':'count', 'aggression':'sum', 'aggression_score':'mean'}).reset_index().rename(columns = col_rename)\naggressions_df_trend_not_logged_in['aggression_ratio'] = aggressions_df_trend_not_logged_in['aggression'] / aggressions_df_trend_not_logged_in['count']\naggressions_df_trend_not_logged_in['aggression_score_reversed'] = -1 * aggressions_df_trend_not_logged_in['aggression_score']\n\n",
"_____no_output_____"
]
],
[
[
"#### Aggression Ratio Log In",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(aggressions_df_trend.year, aggressions_df_trend.aggression_ratio, color = 'black', label ='Total')\nplt.plot(aggressions_df_trend_logged_in.year, aggressions_df_trend_logged_in.aggression_ratio, color = 'blue', label ='Logged In')\nplt.plot(aggressions_df_trend_not_logged_in.year, aggressions_df_trend_not_logged_in.aggression_ratio, color = 'red', label ='Not Logged In')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Aggression Ratio Over Time\")",
"_____no_output_____"
],
[
"aggressions_df[aggressions_df.logged_in == False].year.unique()",
"_____no_output_____"
]
],
[
[
"Aggressions dataset appears to not have any data on comments made without logging in after year 2002.",
"_____no_output_____"
],
[
"### Lets look at the difference of blocked vs randomly sampled sources",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"col_rename = {'rev_id':'count'}\naggressions_df_trend_blocked = aggressions_df[aggressions_df['sample'] == 'blocked'].groupby('year').agg({'rev_id':'count', 'aggression':'sum', 'aggression_score':'mean'}).reset_index().rename(columns = col_rename)\naggressions_df_trend_blocked['aggression_ratio'] = aggressions_df_trend_blocked['aggression'] / aggressions_df_trend_blocked['count']\naggressions_df_trend_blocked['aggression_score_reversed'] = -1 * aggressions_df_trend_blocked['aggression_score']\naggressions_df_trend_random = aggressions_df[aggressions_df['sample'] == 'random'].groupby('year').agg({'rev_id':'count', 'aggression':'sum', 'aggression_score':'mean'}).reset_index().rename(columns = col_rename)\naggressions_df_trend_random['aggression_ratio'] = aggressions_df_trend_random['aggression'] / aggressions_df_trend_random['count']\naggressions_df_trend_random['aggression_score_reversed'] = -1 * aggressions_df_trend_random['aggression_score']\n",
"_____no_output_____"
]
],
[
[
"#### Aggression Ratio Sampling",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(aggressions_df_trend.year, aggressions_df_trend.aggression_ratio, color = 'black', label ='Total')\nplt.plot(aggressions_df_trend_random.year, aggressions_df_trend_random.aggression_ratio, color = 'blue', label ='Random')\nplt.plot(aggressions_df_trend_blocked.year, aggressions_df_trend_blocked.aggression_ratio, color = 'red', label ='Blocked')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Aggression Ratio Over Time\")",
"_____no_output_____"
]
],
[
[
"#### Aggression Score Sampling",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.ticklabel_format(style='plain')\nplt.plot(aggressions_df_trend.year, aggressions_df_trend.aggression_score_reversed, color = 'black', label ='Total')\nplt.plot(aggressions_df_trend_random.year, aggressions_df_trend_random.aggression_score_reversed, color = 'blue', label ='Random')\nplt.plot(aggressions_df_trend_blocked.year, aggressions_df_trend_blocked.aggression_score_reversed, color = 'red', label ='Blocked')\nplt.legend(loc = \"upper left\")\nplt.grid(True)\nplt.title(\"Aggression Score Over Time\")",
"_____no_output_____"
]
],
[
[
"The results are similar to what was found for toxicity. The Blocked source has a much a higher score than the total and randomly sampled source. Additionally, the Randomly sampled source has a consistent score throughout the entire date range. Toxicity and Aggression appear to move similarly as aggressive comments indicate higher toxicity.",
"_____no_output_____"
],
[
"# Further Implications:",
"_____no_output_____"
],
[
"Which, if any, of these demo applications would you expect the Perspective API—or any model trained on the Wikipedia Talk corpus—to perform well in? Why?\n\nThe Perspective API would perform well as long as it is tracking the same subject. For example, the model would be excellent in detecting Toxicity or Severe Toxicity, using the data trained on Wikipedia's Toxicity dataset. Since there is a specifically designation of whether a comment is toxic or not, along with the assigned score for the severity of toxicity, the model would perform well. \n",
"_____no_output_____"
],
[
"Which, if any, of these demo applications would you expect the Perspective API to perform poorly in? Why?\n\nHowever, the Perspective API would not perform so well on a different topic. For example, it would perform poorly in rating Sexually Explicity comments as the Wikipedia Talk corpus doesn't have a scoring for any sexually explicit comments. The Perspective API would perform poorly on scoring a subject that it does not have the fitting training data on. ",
"_____no_output_____"
],
[
"What are some potential unintended, negative consequences of using the Perspective API for any of these purposes? In your opinion, are these consequences likely or serious enough that you would recommend that the Perspective API not be used in these applications? Why or why not?\n\nThere may be unintentional cultural discrimination through the use of Perspective API. One comment may be unintentionally offensive or viewed improper based on a culture. Additionally, if the scorers are not demographically diverse in culture, age, race, sex, and income, there can be the consequence of discrimination to a certain group. Whether the Perspective API be used or not would depend on the seriousness of the bias that results in a machine learned model, but the use of the API would help filter out offensive comments much quicker than a human reader would be able to. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7524d1d057187dcd0e41b9773b4b4a30bd9cfad | 8,735 | ipynb | Jupyter Notebook | get_started_workspace.ipynb | tamersalama/course-v3 | d0bbb67f4cb40d9403463f0743eb2b620a6dad12 | [
"Apache-2.0"
] | 16 | 2019-02-27T22:37:25.000Z | 2021-11-08T12:39:33.000Z | get_started_workspace.ipynb | tamersalama/course-v3 | d0bbb67f4cb40d9403463f0743eb2b620a6dad12 | [
"Apache-2.0"
] | 12 | 2020-01-28T22:12:47.000Z | 2021-10-13T02:15:37.000Z | get_started_workspace.ipynb | tamersalama/course-v3 | d0bbb67f4cb40d9403463f0743eb2b620a6dad12 | [
"Apache-2.0"
] | 7 | 2019-05-28T06:26:26.000Z | 2021-11-27T16:33:55.000Z | 43.242574 | 384 | 0.654493 | [
[
[
"# Workspace\n\nWorkspace is an interactive environment ([Jupyter Lab](https://blog.jupyter.org/jupyterlab-is-ready-for-users-5a6f039b8906)) for developing and running code. You can run Jupyter notebooks, Python scripts and much more. All the files and data in your workspace will be preserved for you, across restarts. You can think of it as your persistent, on-demand machine on the cloud.\n\n",
"_____no_output_____"
],
[
"<br><br>\n\n## Basics\n\n### Stopping a workspace\n\nYou can see the current status of your workspace in the top bar. If you want to stop this workspace, click the *Shutdown* button. All your files and directories in the current directory (`/floyd/home`) will be saved and persisted.\n\n\n\n*Note: Save all your notebooks and files before shutting down the workspace. All running notebooks and scripts will be stopped during Shutdown.*\n\n### Billing \n\n**Important**: You will be charged for the entire duration that your workspace is *running*. Please make sure to shutdown the workspace if you are no longer actively using it. You can [purchase powerups](https://www.floydhub.com/settings/powerups) if you are low on runtime to start a workspace.\n\n### Resuming a workspace\n\nYou can resume working on a stopped workspace by clicking the *Resume* button in the project page. This will restore the workspace to its previous state.\n\n",
"_____no_output_____"
],
[
"<br><br>\n\n## Code\n\nYour workspace is located at `/floyd/home` on the filesystem. \n\n**IMPORTANT - Store your files in /floyd/home**: FloydHub only persists files that are stored under `/floyd/home`. Make sure you store all the files you want persisted here. Files stored in any other location will **not** be saved when your workspace is shutdown.\n\n### Uploading code from local computer\n\nYou can upload your code and other files from your local machine using the upload button in the File Viewer panel (on the left).\n\n\n\n*Make sure you upload your code into the `/floyd/home` directory.*\n\n### Downloading code from Github\n\nIf your code is on Github (or any online repository), you can also clone your repository using the `git clone` command.\n\nUsing the Terminal:\n\n\n\nOr, via your Jupyter Notebook cell:\n\n\n\nYou can now start working on your project. Open and run any Notebook or Python script. You can also edit code files and run them using the terminal.",
"_____no_output_____"
],
[
"<br/><br/>\n## Using Terminal\n\nYou can open a Bash terminal (console) inside your workspace to run code, debug or inspect your files. To open a new terminal, click the *+* button in the File Viewer panel (on the left) and click *terminal* icon.\n\n",
"_____no_output_____"
],
[
"<br/>\n\n## Data\n\n### Downloading data from internet\n\nIf your data is available on the internet, you can also download it directly into your workspace using the Terminal.\n\n```\n# Ensure you're in your workspace directory\ncd /floyd/home\n\nwget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n```\n\n### Attaching FloydHub datasets\n\nFloydHub provides an easy way to manage large datasets that you can use across projects. Instead of downloading your data everytime you start a workspace, you can [upload your data as a FloydHub dataset](https://docs.floydhub.com/guides/create_and_upload_dataset/). \n\nYou can attach FloydHub datasets to your workspace using the right panel. You need to specify the name of the dataset and the directory where you want to attach it. Once the data mounting finishes, you can start using the files in your code.\n\nExample: Attaching the MNIST dataset [https://www.floydhub.com/mckay/datasets/mnist/1](https://www.floydhub.com/mckay/datasets/mnist/1)\n\n\n\nOnce you attach a dataset, it will be available even when you stop and resume the workspace.\n\n*Tip:* You can hide the panel on the right by clicking the *>>* arrow on the panel.\n\n#### Viewing attached datasets\n\nAll FloydHub datasets are attached under `/floyd/input`. You can view your attached datasets using the File Viewer panel. Make sure to click the *Home* icon and select the `data` dirctory.\n\n\n\nYou can also view them using the Terminal:\n\n```bash\nls /floyd/input\n```",
"_____no_output_____"
],
[
"<br><br>\n## Switching between CPU and GPU\n\nYou can easily switch the instance type of your workspace using the *Restart* button. \n\nFor example: if you are working on a Jupyter notebook in a CPU instance, you can switch to a GPU instance to speed up your training. To restart your workspace click the \"Restart\" button and select the new instance you want to use. \n\n\n\nYou can [purchase powerups](https://www.floydhub.com/settings/powerups) if you are low on runtime to start a workspace.\n\n*Save your files before shutdown*: Save all your notebooks and files before restarting the workspace. All running notebooks and scripts will be stopped during Restart.",
"_____no_output_____"
],
[
"<br><br>\n## Performance metrics\n\nYou can view the current load on the machine from the system metrics panel in the bottom bar. It shows the percentage load for various systems like CPU, RAM and GPU utilization.\n\n\n\n### Insights\n\nSometimes you will see a notification icon next to the metrics. To view the insight click on the icon to learn more.",
"_____no_output_____"
],
[
"## Idle Workspaces\n\nWhen your workspace is idle and not actively used for some duration of time, an alert will pop up on your screen notifying that FloydHub is planning to shutdown the workspace to save resources and your powerups. You can dismiss the shutdown option and continue working or change the timeout duration in the settings panel on the right.\n\n",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e75254395adc0b4aaa204b8e97f40ac7aa865fc5 | 1,137 | ipynb | Jupyter Notebook | Jupyter/TensorFlow-Tutorial.ipynb | pynickle/awesome-python-tools | e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07 | [
"BSD-2-Clause"
] | 21 | 2019-06-02T01:55:14.000Z | 2022-01-08T22:35:31.000Z | Jupyter/TensorFlow-Tutorial.ipynb | code-nick-python/daily-tools | e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07 | [
"BSD-2-Clause"
] | 3 | 2019-06-02T01:55:17.000Z | 2019-06-14T12:32:06.000Z | Jupyter/TensorFlow-Tutorial.ipynb | code-nick-python/daily-tools | e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07 | [
"BSD-2-Clause"
] | 16 | 2019-06-23T13:00:04.000Z | 2021-09-18T06:09:58.000Z | 18.639344 | 53 | 0.474934 | [
[
[
"import tensorflow as tf\n\nsess = tf.Session()\na = tf.constant(10)\nb= tf.constant(12)\n\nx = tf.add(a, b, name=\"add\")\ny = tf.div(a, b, name=\"divide\")\nwith tf.Session() as sess:\n print(\"output: \", sess.run([a,b,x,y]))\n\nsess.close()",
"output: [10, 12, 22, 0]\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e75262ea548db2337c09c8b96212e7ddbb2d391c | 38,943 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Exercise - Solving Cart-Pole-checkpoint.ipynb | jpmaldonado/packt-rl | 2edeb903ab47fe4f364da5950c08dc85b4ba3e92 | [
"MIT"
] | 3 | 2018-08-23T19:20:49.000Z | 2019-07-14T09:11:21.000Z | .ipynb_checkpoints/Exercise - Solving Cart-Pole-checkpoint.ipynb | jpmaldonado/packt-rl | 2edeb903ab47fe4f364da5950c08dc85b4ba3e92 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Exercise - Solving Cart-Pole-checkpoint.ipynb | jpmaldonado/packt-rl | 2edeb903ab47fe4f364da5950c08dc85b4ba3e92 | [
"MIT"
] | 4 | 2018-08-23T13:55:39.000Z | 2020-12-08T12:05:11.000Z | 108.476323 | 7,828 | 0.853709 | [
[
[
"# Solving `CartPole`\n\n#### Your task:\n\n\nSolve the `CartPole` environment. Which algorithms could you use? As a warm-up, implement first SARSA or Q-Learning in `FrozenLake`. \n\nSome starter code is below. Note that if you want to use these algorithms for `CartPole` you need to discretize the state space somehow. ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport gym\nimport math",
"_____no_output_____"
]
],
[
[
"### How could you know how to discretize?\n\nYou can try to sample some elements from the observation space (=state space). Then discretize based on that.",
"_____no_output_____"
]
],
[
[
"cp_env = gym.make('CartPole-v0')\ncp_obs = [cp_env.observation_space.sample() for _ in range(10000)]",
"\u001b[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.\u001b[0m\n"
],
[
"plt.hist([ob[0] for ob in cp_obs] )\nplt.title(\"Observation x\")",
"_____no_output_____"
],
[
"plt.hist([ob[1] for ob in cp_obs] )\nplt.title(\"Observation x_dot\")",
"_____no_output_____"
],
[
"plt.hist([ob[2] for ob in cp_obs] )\nplt.title(\"Observation theta\")",
"_____no_output_____"
],
[
"plt.hist([ob[3] for ob in cp_obs] )\nplt.title(\"Observation theta_dot\")",
"_____no_output_____"
]
],
[
[
"Then, we define some limit for the borders.",
"_____no_output_____"
]
],
[
[
"STATE_BOUNDS = list(zip(cp_env.observation_space.low, cp_env.observation_space.high))\nSTATE_BOUNDS[1] = [-0.5, 0.5]\nSTATE_BOUNDS[3] = [-math.radians(50), math.radians(50)]\nNUM_BUCKETS = (3,3,3,3) # state:n_bins pairs",
"_____no_output_____"
],
[
"def obs_to_state(obs):\n bucket_indice = []\n for i in range(len(obs)):\n if obs[i] <= STATE_BOUNDS[i][0]:\n bucket_index = 0\n elif obs[i] >= STATE_BOUNDS[i][1]:\n bucket_index = NUM_BUCKETS[i] - 1\n else:\n # Mapping the state bounds to the bucket array\n bound_width = STATE_BOUNDS[i][1] - STATE_BOUNDS[i][0]\n offset = (NUM_BUCKETS[i]-1)*STATE_BOUNDS[i][0]/bound_width\n scaling = (NUM_BUCKETS[i]-1)/bound_width\n bucket_index = int(round(scaling*obs[i] - offset))\n bucket_indice.append(bucket_index)\n return tuple(bucket_indice)",
"_____no_output_____"
],
[
"def epsilon_greedy_policy(Q, epsilon, actions):\n \"\"\" Q is a numpy array, epsilon between 0,1 \n and a list of actions\"\"\"\n \n def policy_fn(state):\n if np.random.rand()>epsilon and np.max(Q[state])>0: #avoid getting stuck in the initial step\n action = np.argmax(Q[state])\n else:\n action = np.random.choice(actions)\n return action\n return policy_fn\n\n\nenv = gym.make(\"CartPole-v0\")\nn_episodes = 2000\nalpha = 0.1\ngamma = 0.99\n\n# Initialization\nQ = np.zeros(NUM_BUCKETS+(env.action_space.n,))\n\nactions = range(env.action_space.n)\n\nscore = [] \nfor ep in range(n_episodes):\n done = False\n \n obs = env.reset()\n state = obs_to_state(obs)\n \n policy = epsilon_greedy_policy(Q, epsilon=100./(ep+1), actions = actions )\n \n \n ### Generate sample episode\n \n t = 0\n ep_reward = 0\n \n while not done:\n\n action = policy(state) \n new_obs, reward, done, _ = env.step(action)\n new_state = obs_to_state(new_obs)\n \n if done:\n Q[state+(action,)] = Q[state+(action,)] + alpha*(reward - Q[state+(action,)])\n else:\n Q[state+(action,)] = Q[state+(action,)] + alpha*(reward + gamma*np.max(Q[new_state]) - Q[state+(action,)])\n \n \n # Update rewards and state. Remember that CartPole is an on-going task!\n ep_reward += reward\n state = new_state \n \n if done:\n if len(score) < 100:\n score.append(ep_reward)\n else:\n score[ep % 100] = ep_reward\n \n if (ep+1) % 100 == 0:\n print(\"Number of episodes: {} . Average 100-episode reward: {}\".format(ep+1, np.mean(score)))\n ",
"\u001b[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.\u001b[0m\nNumber of episodes: 100 . Average 100-episode reward: 26.05\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e752668cb34e169031d1bf56412d3db3ca314581 | 94,702 | ipynb | Jupyter Notebook | example_notebooks/6_cornernet_lite/Train CornerNet-Squeeze.ipynb | jayeshk7/Monk_Object_Detection | 8395c718c299312ebb518a911cd8729629eba16b | [
"Apache-2.0"
] | 7 | 2020-09-16T06:05:51.000Z | 2021-04-07T12:05:21.000Z | example_notebooks/6_cornernet_lite/Train CornerNet-Squeeze.ipynb | jayeshk7/Monk_Object_Detection | 8395c718c299312ebb518a911cd8729629eba16b | [
"Apache-2.0"
] | null | null | null | example_notebooks/6_cornernet_lite/Train CornerNet-Squeeze.ipynb | jayeshk7/Monk_Object_Detection | 8395c718c299312ebb518a911cd8729629eba16b | [
"Apache-2.0"
] | null | null | null | 127.975676 | 42,801 | 0.807269 | [
[
[
"# Installation\n\n - Run these commands\n \n - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git\n \n - cd Monk_Object_Detection/6_cornernet_lite/installation\n \n - Select the right requirements file and run\n \n - chmod +x install.sh\n - ./install.sh",
"_____no_output_____"
],
[
"# About the network\n\n1. Paper on CornerNet: https://arxiv.org/abs/1808.01244\n\n2. Paper on CornerNet-Lite: https://arxiv.org/abs/1904.08900\n\n3. Blog 1 on CornerNet: https://joshua19881228.github.io/2019-01-20-CornerNet/\n\n4. Blog 2 on CornerNet: https://zhangtemplar.github.io/anchor-free-detection/\n\n5. Blog 3 on CornerNet: https://opencv.org/latest-trends-of-object-detection-from-cornernet-to-centernet-explained-part-i-cornernet/\n\n6. Blog 4 on CornerNet: https://towardsdatascience.com/centernet-keypoint-triplets-for-object-detection-review-a314a8e4d4b0\n\n7. Blog 5 on CornerNet: https://medium.com/@andersasac/the-end-of-anchors-improving-object-detection-models-and-annotations-73828c7b39f6",
"_____no_output_____"
],
[
"# COCO Format - 1\n\n## Dataset Directory Structure\n\n ../sample_dataset (root_dir)\n |\n |------ship (coco_dir) \n | |\n | |----images (img_dir)\n | |\n | |------Train (set_dir) (Train)\n | |\n | |---------img1.jpg\n | |---------img2.jpg\n | |---------..........(and so on)\n |\n |\n | |---annotations \n | |----|\n | |--------------------instances_Train.json (instances_<set_dir>.json)\n | |--------------------classes.txt\n \n \n - instances_Train.json -> In proper COCO format\n - classes.txt -> A list of classes in alphabetical order\n \n\nFor TrainSet\n - root_dir = \"../sample_dataset\";\n - coco_dir = \"ship\";\n - img_dir = \"images\";\n - set_dir = \"Train\";\n\n \n Note: Annotation file name too coincides against the set_dir",
"_____no_output_____"
],
[
"# COCO Format - 2\n\n## Dataset Directory Structure\n\n ../sample_dataset (root_dir)\n |\n |------ship (coco_dir) \n | |\n | |---ImagesTrain (set_dir)\n | |----|\n | |-------------------img1.jpg\n | |-------------------img2.jpg\n | |-------------------.........(and so on)\n |\n |\n | |---annotations \n | |----|\n | |--------------------instances_ImagesTrain.json (instances_<set_dir>.json)\n | |--------------------classes.txt\n \n \n - instances_Train.json -> In proper COCO format\n - classes.txt -> A list of classes in alphabetical order\n \n For TrainSet\n - root_dir = \"../sample_dataset\";\n - coco_dir = \"ship\";\n - img_dir = \"./\";\n - set_dir = \"ImagesTrain\";\n\n \n Note: Annotation file name too coincides against the set_dir\n ",
"_____no_output_____"
],
[
"# Sample Dataset Credits\n\n credits: https://github.com/experiencor/kangaroo",
"_____no_output_____"
]
],
[
[
"import os\nimport sys",
"_____no_output_____"
],
[
"sys.path.append(\"../../6_cornernet_lite/lib/\")",
"_____no_output_____"
],
[
"from train_detector import Detector",
"_____no_output_____"
],
[
"gtf = Detector();",
"_____no_output_____"
],
[
"root_dir = \"../sample_dataset\";\ncoco_dir = \"kangaroo\"\nimg_dir = \"/\"\nset_dir = \"Images\"",
"_____no_output_____"
],
[
"gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=4, use_gpu=True, num_workers=4)",
"_____no_output_____"
],
[
"gtf.Model(model_name=\"CornerNet_Squeeze\")",
"_____no_output_____"
],
[
"gtf.Hyper_Params(lr=0.00025, total_iterations=1000)",
"_____no_output_____"
],
[
"gtf.Setup();",
"loading annotations into memory...\nDone (t=0.00s)\ncreating index...\nindex created!\nloading annotations into memory...\nDone (t=0.00s)\ncreating index...\nindex created!\nloading annotations into memory...\nDone (t=0.00s)\ncreating index...\nindex created!\nloading annotations into memory...\nDone (t=0.00s)\ncreating index...\nindex created!\nLoading Model - core.models.CornerNet_Squeeze\nModel Loaded\n"
],
[
"gtf.Train();",
"start_iter = 0\ndistributed = False\nworld_size = 0\ninitialize = False\nbatch_size = 4\nlearning_rate = 0.00025\nmax_iteration = 1000\nstepsize = 800\nsnapshot = 500\nval_iter = 500\ndisplay = 100\ndecay_rate = 10\nProcess 0: building model...\ntotal parameters: 31690640\nstart prefetching data...\nshuffling indices...\nstart prefetching data...\nstart prefetching data...\nshuffling indices...\nshuffling indices...\nsetting learning rate to: 0.00025\ntraining start...\n 0%| | 0/1000 [00:00<?, ?it/s]start prefetching data...\nshuffling indices...\nProcess 0: training loss at iteration 100: 5.108389377593994 \n 15%|█████▉ | 148/1000 [01:16<07:22, 1.92it/s]shuffling indices...\n 15%|██████ | 150/1000 [01:18<07:22, 1.92it/s]shuffling indices...\n 15%|██████ | 151/1000 [01:18<07:21, 1.92it/s]shuffling indices...\n 15%|██████ | 153/1000 [01:19<07:23, 1.91it/s]shuffling indices...\nProcess 0: training loss at iteration 200: 5.436167240142822 \nProcess 0: training loss at iteration 300: 4.454023361206055 \n 31%|████████████▍ | 312/1000 [02:42<05:58, 1.92it/s]shuffling indices...\n 31%|████████████▌ | 314/1000 [02:43<05:59, 1.91it/s]shuffling indices...\n 32%|████████████▌ | 315/1000 [02:43<05:58, 1.91it/s]shuffling indices...\n 32%|████████████▋ | 317/1000 [02:44<05:59, 1.90it/s]shuffling indices...\nProcess 0: training loss at iteration 400: 4.5771660804748535 \n 48%|███████████████████ | 476/1000 [04:07<04:50, 1.80it/s]shuffling indices...\n 48%|███████████████████ | 478/1000 [04:09<04:48, 1.81it/s]shuffling indices...\n 48%|███████████████████▏ | 479/1000 [04:09<04:44, 1.83it/s]shuffling indices...\n 48%|███████████████████▏ | 481/1000 [04:10<04:38, 1.87it/s]shuffling indices...\nProcess 0: training loss at iteration 500: 3.77142333984375 \nsaving model to ./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_intermediate.pkl\nProcess 0: training loss at iteration 600: 3.9744179248809814 \n 64%|█████████████████████████▌ | 640/1000 [05:36<03:09, 1.90it/s]shuffling indices...\n 64%|█████████████████████████▋ | 642/1000 [05:37<03:07, 1.91it/s]shuffling indices...\n 64%|█████████████████████████▋ | 643/1000 [05:38<03:06, 1.91it/s]shuffling indices...\n 64%|█████████████████████████▊ | 645/1000 [05:39<03:05, 1.92it/s]shuffling indices...\nProcess 0: training loss at iteration 700: 4.035816669464111 \nProcess 0: training loss at iteration 800: 3.424349784851074 \nsetting learning rate to: 2.5e-05 \n 80%|████████████████████████████████▏ | 804/1000 [07:03<01:42, 1.92it/s]shuffling indices...\n 81%|████████████████████████████████▏ | 806/1000 [07:04<01:42, 1.90it/s]shuffling indices...\n 81%|████████████████████████████████▎ | 807/1000 [07:05<01:41, 1.90it/s]shuffling indices...\n 81%|████████████████████████████████▎ | 809/1000 [07:06<01:40, 1.90it/s]shuffling indices...\nProcess 0: training loss at iteration 900: 3.235015869140625 \n 97%|██████████████████████████████████████▋ | 968/1000 [08:29<00:16, 1.91it/s]shuffling indices...\n 97%|██████████████████████████████████████▊ | 970/1000 [08:30<00:15, 1.90it/s]shuffling indices...\n 97%|██████████████████████████████████████▊ | 971/1000 [08:31<00:15, 1.90it/s]shuffling indices...\n 97%|██████████████████████████████████████▉ | 973/1000 [08:32<00:14, 1.90it/s]shuffling indices...\nProcess 0: training loss at iteration 1000: 3.520153760910034 \nsaving model to ./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_intermediate.pkl\n100%|███████████████████████████████████████| 1000/1000 [08:46<00:00, 1.90it/s]\nsaving model to ./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_final.pkl\n"
]
],
[
[
"# Inference",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nsys.path.append(\"../../6_cornernet_lite/lib/\")",
"_____no_output_____"
],
[
"from infer_detector import Infer",
"_____no_output_____"
],
[
"gtf = Infer();",
"_____no_output_____"
],
[
"class_list = [\"kangaroo\"]\ngtf.Model(class_list, \n base=\"CornerNet_Squeeze\", \n model_path=\"./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_final.pkl\")",
"total parameters: 31690640\nloading from ./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_final.pkl\n"
],
[
"boxes = gtf.Predict(\"../sample_dataset/kangaroo/test/kg1.jpeg\", vis_thresh=0.2, output_img=\"output.jpg\")\nfrom IPython.display import Image\nImage(filename='output.jpg')",
"_____no_output_____"
],
[
"boxes = gtf.Predict(\"../sample_dataset/kangaroo/test/kg3.jpeg\", vis_thresh=0.23, output_img=\"output.jpg\")\nfrom IPython.display import Image\nImage(filename='output.jpg')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e752694ae82335706b617bc1cfd1530c7547eb60 | 2,266 | ipynb | Jupyter Notebook | test/Ai2.ipynb | awesome-archive/NeuralVerification.jl | ea68308ca1594787f508cde583e1e34b4b3a25db | [
"MIT"
] | 2 | 2019-07-06T04:43:24.000Z | 2019-07-20T01:32:27.000Z | test/Ai2.ipynb | awesome-archive/NeuralVerification.jl | ea68308ca1594787f508cde583e1e34b4b3a25db | [
"MIT"
] | null | null | null | test/Ai2.ipynb | awesome-archive/NeuralVerification.jl | ea68308ca1594787f508cde583e1e34b4b3a25db | [
"MIT"
] | 1 | 2019-09-02T00:41:32.000Z | 2019-09-02T00:41:32.000Z | 24.106383 | 366 | 0.552957 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e752792922851d7dd41c9cdb52d30c2d1efe2871 | 123,593 | ipynb | Jupyter Notebook | tutorials/notebooks/python/dataframes/df_basics.ipynb | TileDB-Inc/TileDB-Examples | dfcdbcaddc54774b07a9aa8a947520776e0d3759 | [
"MIT"
] | 12 | 2021-02-09T03:01:46.000Z | 2022-03-15T03:39:31.000Z | tutorials/notebooks/python/dataframes/df_basics.ipynb | TileDB-Inc/TileDB-Cloud-Example-Notebooks | bb47149e98e3dc565364027b0812f9381bd77f03 | [
"MIT"
] | 6 | 2021-01-15T22:15:08.000Z | 2022-03-11T23:41:58.000Z | tutorials/notebooks/python/dataframes/df_basics.ipynb | TileDB-Inc/TileDB-Cloud-Example-Notebooks | bb47149e98e3dc565364027b0812f9381bd77f03 | [
"MIT"
] | 3 | 2020-06-05T13:41:47.000Z | 2020-12-23T07:39:08.000Z | 35.352689 | 791 | 0.401244 | [
[
[
"# Dataframes: The Basics",
"_____no_output_____"
],
[
"This tutorial will cover the following topics:\n\n* Storing a dataframe as a TileDB 1D dense array to allow fast (out-of-core) slicing on rows\n* Storing a dataframe as a TileDB ND sparse array to allow fast (out-of-core) execution of column range predicates\n* Interoperating with Pandas and [Apache Arrow](https://arrow.apache.org/)\n* Fast subselection on columns\n* Running SQL queries on the stored dataframes\n* Measuring performance in TileDB\n* Running on different storage backends\n* Some basic virtual file system (VFS) operations with TileDB\n\n[TileDB can model dataframes](https://docs.tiledb.com/main/basic-concepts/data-model#dataframes-can-be-modeled-as-dense-or-sparse-arrays) either as **dense** or **sparse arrays**. Storing a dataframe as a (1D) dense array allows for rapid slicing on row indices. On the other hand, storing the dataframe as a ND sparse array, specifying any subset of the columns to act as the *dimensions*, allows for rapid slicing on range predicates on those column dimensions.\n\nIn either case and in addition to the slicing predicate, TileDB allows for very fast subselection of columns. This is because it implements a \"columnar\" format and, therefore, it fetches from persistent storage only data from the requested columns.\n\nThis notebook was run on a **2.3 GHz Intel Core i9, 8 cores, 16GB RAM, running MacOS Mojave**.",
"_____no_output_____"
],
[
"## Getting Started\n\n### Dataset\n\nWe will use the [NYC Taxi Trip dataset](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) and specifically the **yellow taxi trip records** which has [this schema](https://www1.nyc.gov/assets/tlc/downloads/pdf/data_dictionary_trip_records_yellow.pdf). ",
"_____no_output_____"
],
[
"We will focus on ingesting the data from [January 2020](https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2020-01.csv), namely file `yellow_tripdata_2020-01.csv`. The file is about 560MB.",
"_____no_output_____"
]
],
[
[
"!wget https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2020-01.csv",
"_____no_output_____"
],
[
"!ls -alh yellow_tripdata_2020-01.csv",
"-rw-rw---- 1 stavros staff 566M Jul 30 00:07 yellow_tripdata_2020-01.csv\n"
]
],
[
[
"### Installation\n\nYou need to install [TileDB-Py](https://github.com/TileDB-Inc/TileDB-Py), the Python wrapper of [TileDB Embedded](https://github.com/TileDB-Inc/TileDB), as follows:",
"_____no_output_____"
],
[
"```bash\n# Pip:\n$ pip install tiledb\n\n# Or Conda:\n$ conda install -c conda-forge tiledb-py\n```",
"_____no_output_____"
],
[
"The notebook was run using **Pandas 1.1.0**.",
"_____no_output_____"
],
[
"Note that the TileDB core is a C++ library. To boost performance when integrating with pandas, we use Apache Arrow to achieve zero-copy when returning results from TileDB into pandas dataframes. You need to **install pyarrow** to take advantage of this optimization:\n\n```bash\n# Pip:\n$ pip install pyarrow\n\n# Or Conda:\n$ conda install -c conda-forge pyarrow\n```",
"_____no_output_____"
],
[
"### Setup\n\nImport TileDB and check the versions of the C++ core and TileDB-Py respectively.",
"_____no_output_____"
]
],
[
[
"import tiledb, numpy as np",
"_____no_output_____"
],
[
"# Version of TileDB core (C++ library)\ntiledb.libtiledb.version()",
"_____no_output_____"
],
[
"# Version of TileDB-Py (Python wrapper)\ntiledb.__version__",
"_____no_output_____"
]
],
[
[
"Before we start, we create the TileDB context passing a **configuration parameter** around memory allocation during read queries that will be explained in a later tutorial. That needs to be set at the *very beginning* of the code and before any other TileDB function is called.",
"_____no_output_____"
]
],
[
[
"cfg = tiledb.Ctx().config()\ncfg.update(\n {\n 'py.init_buffer_bytes': 1024**2 * 50\n }\n)\ntiledb.default_ctx(cfg)",
"_____no_output_____"
]
],
[
[
"We also enable the TileDB **stats** so that we can get some insight into performance.",
"_____no_output_____"
]
],
[
[
"tiledb.stats_enable()",
"_____no_output_____"
]
],
[
[
"## The Dense Case",
"_____no_output_____"
],
[
"We ingest the `yellow_tripdata_2020-01.csv` CSV file into a TileDB dense array as shown below. The command takes the taxi CSV file and ingests it into a 1D dense array called `taxi_dense_array`. It sets the tile extent to 100K, which means that groups of 100K rows each across every column will comprise the atomic unit of compression and IO (i.e., a [data tile](https://docs.tiledb.com/main/basic-concepts/terminology#data-tile)). Two of the columns (`tpep_dropoff_datetime` and `tpep_pickup_datetime`) are dates, so we make sure to parse them as such. Finally, one of the columns (`store_and_fwd_flag`) may have nulls, so we explicitly set some null value.",
"_____no_output_____"
]
],
[
[
"%%time\ntiledb.stats_reset()\ntiledb.from_csv(\"taxi_dense_array\", \"yellow_tripdata_2020-01.csv\", \n tile = 100000, \n parse_dates=['tpep_dropoff_datetime', 'tpep_pickup_datetime'], \n fillna={'store_and_fwd_flag': ''})\ntiledb.stats_dump()",
"/opt/miniconda3/envs/tiledb/lib/python3.8/site-packages/IPython/core/magic.py:187: DtypeWarning: Columns (6) have mixed types.Specify dtype option on import or set low_memory=False.\n call = lambda f, *a, **k: f(*a, **k)\n"
]
],
[
[
"From the stats, the actual write time in TileDB took under 1 second (the rest was mostly parsing the CSV in Pandas). The raw uncompressed CSV data was about 870 MB in binary format, and those got compressed down to about 131 MB in TileDB. There are 18 columns written as attributes, one of which is var-sized (of string type, as we will see in the schema below).",
"_____no_output_____"
],
[
"Next, let's open the written array and inspect the TileDB schema.",
"_____no_output_____"
]
],
[
[
"A = tiledb.open(\"taxi_dense_array\")\nprint(A.schema)",
"ArraySchema(\n domain=Domain(*[\n Dim(name='__tiledb_rows', domain=(0, 6405007), tile=100000, dtype='uint64'),\n ]),\n attrs=[\n Attr(name='VendorID', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tpep_pickup_datetime', dtype='datetime64[ns]', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tpep_dropoff_datetime', dtype='datetime64[ns]', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='passenger_count', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='trip_distance', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='RatecodeID', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='store_and_fwd_flag', dtype='<U0', var=True, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='PULocationID', dtype='int64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='DOLocationID', dtype='int64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='payment_type', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='fare_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='extra', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='mta_tax', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tip_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tolls_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='improvement_surcharge', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='total_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='congestion_surcharge', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n ],\n cell_order='row-major',\n tile_order='row-major',\n capacity=10000,\n sparse=False,\n coords_filters=FilterList([ZstdFilter(level=-1), ])\n)\n\n"
]
],
[
[
"That shows the 18 columns being stored as attributes, along with their types and filters (e.g., zstd compression, which is the default). There is a single dimension `__tiledb_rows`, which corresponds to the row indices. This essentially means that you will be able to slice fast across the row indices. ",
"_____no_output_____"
],
[
"In order to see the number of rows ingested into this array, you can use the non-empty domain. The range below is inclusive and states that there are 6,405,008 rows in the array.",
"_____no_output_____"
]
],
[
[
"print(A.nonempty_domain())",
"((array(0, dtype=uint64), array(6405007, dtype=uint64)),)\n"
]
],
[
[
"Let's reset the stats and perform a **full read** of the array (all rows and all columns). The result is stored directly in a pandas dataframe. Note that ranges with `df` are always *inclusive*.",
"_____no_output_____"
]
],
[
[
"%%time\ntiledb.stats_reset()\ndf = A.df[0:6405007]\ndf",
"CPU times: user 3.14 s, sys: 5.12 s, total: 8.26 s\nWall time: 1.59 s\n"
],
[
"tiledb.stats_dump()",
"TileDB Embedded Version: (2, 1, 3)\nTileDB-Py Version: 0.7.4\n==== READ ====\n\n- Number of read queries: 1\n- Number of attempts until results are found: 1\n\n- Number of attributes read: 18\n * Number of fixed-sized attributes read: 17\n * Number of var-sized attributes read: 1\n- Number of dimensions read: 1\n * Number of fixed-sized dimensions read: 1\n\n- Number of logical tiles overlapping the query: 65\n- Number of physical tiles read: 1300\n * Number of physical fixed-sized tiles read: 1170\n * Number of physical var-sized tiles read: 130\n- Number of cells read: 6500000\n- Number of result cells: 6405008\n- Percentage of useful cells read: 98.5386%\n\n- Number of bytes read: 141109657 bytes (0.131419 GB) \n- Number of read operations: 87\n- Number of bytes unfiltered: 942510623 bytes (0.877781 GB) \n- Unfiltering inflation factor: 6.67928x\n\n- Time to compute estimated result size: 0.00081386 secs\n * Time to compute tile overlap: 0.000311342 secs\n > Time to compute relevant fragments: 1.8264e-05 secs\n > Time to load relevant fragment R-trees: 0.000177244 secs\n > Time to compute relevant fragment tile overlap: 0.000106599 secs\n\n- Total metadata read: 10568 bytes (9.84222e-06 GB) \n * R-tree: 8 bytes (7.45058e-09 GB) \n * Fixed-sized tile offsets: 9504 bytes (8.85129e-06 GB) \n * Var-sized tile offsets: 528 bytes (4.91738e-07 GB) \n * Var-sized tile sizes: 528 bytes (4.91738e-07 GB) \n\n- Time to load array metadata: 0.00107096 secs\n * Array metadata size: 55 bytes (5.12227e-08 GB) \n\n- Time to initialize the read state: 6.5081e-05 secs\n\n- Read time: 0.853632 secs\n * Time to compute next partition: 0.000322538 secs\n * Time to compute tile coordinates: 3.8468e-05 secs\n * Time to compute result coordinates: 8.112e-06 secs\n > Time to compute sparse result tiles: 4.263e-06 secs\n * Time to compute dense result cell slabs: 0.000260793 secs\n * Time to copy result attribute values: 0.838509 secs\n > Time to read attribute tiles: 0.132942 secs\n > Time to unfilter attribute tiles: 0.209192 secs\n > Time to copy fixed-sized attribute values: 0.352938 secs\n > Time to copy var-sized attribute values: 0.0787757 secs\n * Time to fill dense coordinates: 0.0144067 secs\n\n- Total read query time (array open + init state + read): 0.853698 secs\n==== Python Stats ====\n\n- TileDB-Py Indexing Time: 1.49114\n * TileDB-Py query execution time: 0.861424\n > TileDB C++ Core initial query submit time: 0.853751\n * TileDB-Py buffer conversion time: 0.627318\n\n"
]
],
[
[
"This operation fetches the entire array / dataframe from the disk, decompresses all tiles and creates a pandas dataframe with the result. The whole process takes 1.2 seconds in TileDB core (C++) and about 0.7 seconds on the Python wrapper side for buffer conversion. ",
"_____no_output_____"
],
[
"The stats are quite informative. They break down how long it took to read from storage and unfilter (i.e., decompress), how many cells were fetched, what is the percentage of useful results, etc. ",
"_____no_output_____"
],
[
"However, note that you do not need to read the entire dataframe in main memory in order to process it. You can efficiently slice any subset of rows directly from storage as follows. TileDB makes very lightweight use of main memory to process the result. Note that `df[]` works with [mulit-index semantics](https://docs.tiledb.com/main/solutions/tiledb-embedded/api-usage/reading-arrays/multi-range-subarrays) and thus can take [multi-range subarrays](https://docs.tiledb.com/main/basic-concepts/terminology#subarray) as well.",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.df[0:999]\ndf",
"CPU times: user 19.1 ms, sys: 137 ms, total: 156 ms\nWall time: 74.2 ms\n"
]
],
[
[
"Notice how much faster that operation was, taking only a few milliseconds.",
"_____no_output_____"
],
[
"Finally, you can slice any **subset of columns**, without fetching all the columns first in a pandas dataframe. ",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.query(attrs=['tpep_dropoff_datetime', 'fare_amount']).df[0:6405007]\ndf",
"CPU times: user 423 ms, sys: 614 ms, total: 1.04 s\nWall time: 176 ms\n"
]
],
[
[
"Once again, that operation was much faster than fetching the entire dataframe in main memory. The stats also inform you about how many attributes (i.e., columns) were retrieved, which is two in this example.",
"_____no_output_____"
],
[
"Remember to close the array when you are done.",
"_____no_output_____"
]
],
[
[
"A.close()",
"_____no_output_____"
]
],
[
[
"## The Sparse Case",
"_____no_output_____"
],
[
"Storing the dataframe as a 1D dense array allowed us to rapidly slice on row indexes. *But what if we wished to slice fast on predicates applied to column values*, such as dropoff time and fare amount? For such scenarios and if you know for a fact that the majority of your workloads involve applying a range (or equality) predicate on a specific subset of columns, you can create a sparse array with those columns set as the dimensions. \n\nThis can be done as follows. Instead of the `tile` argument we used in dense arrays, we use `capacity` to determine how many rows to group in a data tile (read about [the difference between dense and sparse data tiles](https://docs.tiledb.com/main/basic-concepts/terminology#data-tile)). Also `index_col` determines which columns will act as dimensions.",
"_____no_output_____"
]
],
[
[
"%%time\ntiledb.stats_reset()\ntiledb.from_csv(\"taxi_sparse_array\", \"yellow_tripdata_2020-01.csv\", \n capacity=100000, \n sparse=True, \n index_col=['tpep_dropoff_datetime', 'fare_amount'], \n parse_dates=['tpep_dropoff_datetime', 'tpep_pickup_datetime'], \n fillna={'store_and_fwd_flag': ''})\ntiledb.stats_dump()",
"/opt/miniconda3/envs/tiledb/lib/python3.8/site-packages/IPython/core/magic.py:187: DtypeWarning: Columns (6) have mixed types.Specify dtype option on import or set low_memory=False.\n call = lambda f, *a, **k: f(*a, **k)\n/opt/miniconda3/envs/tiledb/lib/python3.8/site-packages/numpy/lib/arraysetops.py:580: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n mask |= (ar1 == a)\n"
]
],
[
[
"Once again, most of the total ingestion time is spent on parsing on the pandas side. Notice that the R-tree (which is 2D) this time is slightly larger, as this is the main indexing method is sparse arrays. It is still tiny though relative to the entire array size, which is ~100MB. ",
"_____no_output_____"
],
[
"Note that you can choose **any** subset of columns as the dimensions (any number with different types, even strings).",
"_____no_output_____"
],
[
"Let's open the array and print the schema.",
"_____no_output_____"
]
],
[
[
"A = tiledb.open(\"taxi_sparse_array\")\nprint(A.schema)",
"ArraySchema(\n domain=Domain(*[\n Dim(name='tpep_dropoff_datetime', domain=(numpy.datetime64('2003-01-01T14:16:59.000000000'), numpy.datetime64('2021-01-02T01:25:01.000000000')), tile=1000 nanoseconds, dtype='datetime64[ns]'),\n Dim(name='fare_amount', domain=(-1238.0, 4265.0), tile=1000.0, dtype='float64'),\n ]),\n attrs=[\n Attr(name='VendorID', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tpep_pickup_datetime', dtype='datetime64[ns]', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='passenger_count', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='trip_distance', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='RatecodeID', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='store_and_fwd_flag', dtype='<U0', var=True, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='PULocationID', dtype='int64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='DOLocationID', dtype='int64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='payment_type', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='extra', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='mta_tax', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tip_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='tolls_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='improvement_surcharge', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='total_amount', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n Attr(name='congestion_surcharge', dtype='float64', var=False, filters=FilterList([ZstdFilter(level=1), ])),\n ],\n cell_order='row-major',\n tile_order='row-major',\n capacity=100000,\n sparse=True,\n allows_duplicates=True,\n coords_filters=FilterList([ZstdFilter(level=-1), ])\n)\n\n"
]
],
[
[
"Observe that now the array is sparse, having 16 attributes and 2 dimensions. Also notice that, by default, the array **allows duplicates**. This can be turned off by passing `allows_duplicates=False` in `from_csv`, which will return an error if the CSV contains rows with identical coordinates along the array dimensions.",
"_____no_output_____"
],
[
"Let's print the non-empty domain for the sparse array.",
"_____no_output_____"
]
],
[
[
"A.nonempty_domain()",
"_____no_output_____"
]
],
[
[
"The first range corresponds to `tpep_dropoff_datetime` and the second to `fare_amount`. ",
"_____no_output_____"
],
[
"Now let's slice the whole array into a pandas dataframe.",
"_____no_output_____"
]
],
[
[
"%%time\ntiledb.stats_reset()\ndf = A.query().df[:]\ndf",
"CPU times: user 4.36 s, sys: 6.17 s, total: 10.5 s\nWall time: 3.05 s\n"
],
[
"tiledb.stats_dump()",
"TileDB Embedded Version: (2, 1, 3)\nTileDB-Py Version: 0.7.4\n==== READ ====\n\n- Number of read queries: 1\n- Number of attempts until results are found: 1\n\n- Number of attributes read: 16\n * Number of fixed-sized attributes read: 15\n * Number of var-sized attributes read: 1\n- Number of dimensions read: 2\n * Number of fixed-sized dimensions read: 2\n\n- Number of logical tiles overlapping the query: 65\n- Number of physical tiles read: 1235\n * Number of physical fixed-sized tiles read: 1105\n * Number of physical var-sized tiles read: 130\n- Number of cells read: 6405008\n- Number of result cells: 6405008\n- Percentage of useful cells read: 100%\n\n- Number of bytes read: 117472852 bytes (0.109405 GB) \n- Number of read operations: 86\n- Number of bytes unfiltered: 928739186 bytes (0.864956 GB) \n- Unfiltering inflation factor: 7.90599x\n\n- Time to compute estimated result size: 0.000917319 secs\n * Time to compute tile overlap: 0.000401086 secs\n > Time to compute relevant fragments: 1.2216e-05 secs\n > Time to load relevant fragment R-trees: 0.00025591 secs\n > Time to compute relevant fragment tile overlap: 0.000130426 secs\n\n- Total metadata read: 12928 bytes (1.20401e-05 GB) \n * R-tree: 2368 bytes (2.20537e-06 GB) \n * Fixed-sized tile offsets: 9504 bytes (8.85129e-06 GB) \n * Var-sized tile offsets: 528 bytes (4.91738e-07 GB) \n * Var-sized tile sizes: 528 bytes (4.91738e-07 GB) \n\n- Time to load array metadata: 0.000244082 secs\n * Array metadata size: 98 bytes (9.12696e-08 GB) \n\n- Time to initialize the read state: 6.9947e-05 secs\n\n- Read time: 1.36381 secs\n * Time to compute next partition: 0.000467399 secs\n * Time to compute result coordinates: 0.537777 secs\n > Time to compute sparse result tiles: 0.000178318 secs\n > Time to read coordinate tiles: 0.0154381 secs\n > Time to unfilter coordinate tiles: 0.0532034 secs\n > Time to compute range result coordinates: 0.319006 secs\n * Time to compute sparse result cell slabs: 0.0198344 secs\n * Time to copy result attribute values: 0.749709 secs\n > Time to read attribute tiles: 0.0899356 secs\n > Time to unfilter attribute tiles: 0.168391 secs\n > Time to copy fixed-sized attribute values: 0.352634 secs\n > Time to copy var-sized attribute values: 0.0858673 secs\n * Time to copy result coordinates: 0.0470755 secs\n > Time to copy fixed-sized coordinates: 0.0293159 secs\n\n- Total read query time (array open + init state + read): 1.36388 secs\n==== Python Stats ====\n\n- TileDB-Py Indexing Time: 2.94222\n * TileDB-Py query execution time: 1.37205\n > TileDB C++ Core initial query submit time: 1.36393\n * TileDB-Py buffer conversion time: 1.56823\n\n"
]
],
[
[
"Notice that this takes longer than the dense case. This is because the sparse case involves more advanced indexing and copying operations than dense. However, the real benefit of sparse dataframe modeling is the ability to **slice rapidly with range conditions on the indexed dimensions**, without having to fetch the entire dataframe in main memory.",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.df[np.datetime64(\"2020-07-01\"):np.datetime64(\"2020-10-01\"), 5.5:12.5]\ndf",
"CPU times: user 14.7 ms, sys: 83.8 ms, total: 98.4 ms\nWall time: 92.2 ms\n"
]
],
[
[
"This is truly rapid. In the dense case, you would have to load the whole dataframe in main memory and then slice using pandas.",
"_____no_output_____"
],
[
"You can subset on attributes as follows.",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.query(attrs=['trip_distance']).df[:]\ndf",
"CPU times: user 1.65 s, sys: 798 ms, total: 2.45 s\nWall time: 1.61 s\n"
]
],
[
[
"By default, TileDB fetches also the coordinate values and sets them as pandas indices. To disable them, you can run:",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.query(dims=False, attrs=['trip_distance']).df[:]\ndf",
"CPU times: user 787 ms, sys: 533 ms, total: 1.32 s\nWall time: 655 ms\n"
]
],
[
[
"Wer can also subselect on dimensions:",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.query(dims=['tpep_dropoff_datetime'], attrs=['trip_distance']).df[:]\ndf",
"CPU times: user 822 ms, sys: 690 ms, total: 1.51 s\nWall time: 662 ms\n"
]
],
[
[
"Finally, you can choose even attributes to act as dataframe indices using the `index_col` argument.",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.query(index_col=['trip_distance'], attrs=['passenger_count', 'trip_distance']).df[:]\ndf",
"CPU times: user 1.02 s, sys: 1.3 s, total: 2.32 s\nWall time: 811 ms\n"
]
],
[
[
"For convenience, TileDB can also return dataframe results as an **Arrow Table** as follows:",
"_____no_output_____"
]
],
[
[
"%%time\ndf = A.query(return_arrow=True, index_col=['trip_distance'], attrs=['passenger_count', 'trip_distance']).df[:]\ndf",
"CPU times: user 1 s, sys: 972 ms, total: 1.97 s\nWall time: 742 ms\n"
]
],
[
[
"Since we are done, we can close the array.",
"_____no_output_____"
]
],
[
[
"A.close()",
"_____no_output_____"
]
],
[
[
"## Storing Pandas Dataframes in TileDB Arrays",
"_____no_output_____"
],
[
"You can also store a pandas dataframe you already created in main memory into a TileDB array. The following will create a new TileDB array and write the contents of a pandas dataframe.",
"_____no_output_____"
]
],
[
[
"# First read some data into a pandas dataframe\nA = tiledb.open(\"taxi_sparse_array\")\ndf = A.query(attrs=['passenger_count', 'trip_distance']).df[:]\ndf\n\n# Create and write into a TileDB array\ntiledb.from_pandas(\"sliced_taxi_sparse_array\", df)",
"_____no_output_____"
]
],
[
[
"Let's inspect the schema.",
"_____no_output_____"
]
],
[
[
"A2 = tiledb.open(\"sliced_taxi_sparse_array\")\nA2.schema",
"_____no_output_____"
]
],
[
[
"Reading the array back:",
"_____no_output_____"
]
],
[
[
"A2.df[:]",
"_____no_output_____"
]
],
[
[
"Lastly, we close the opened arrays.",
"_____no_output_____"
]
],
[
[
"A.close()\nA2.close()",
"_____no_output_____"
]
],
[
[
"## Running SQL Queries",
"_____no_output_____"
],
[
"One of the cool things about TileDB is that it offers a powerful integration with [embedded MariaDB](https://docs.tiledb.com/main/solutions/tiledb-embedded/api-usage/embedded-sql). This allows for execution of arbitrary SQL queries directly on TileDB arrays (both dense and sparse). We took appropriate care to push the fast slicing and attribute subsetting portions of the query down to TileDB, leaving the rest of the SQL execution to MariaDB. In other words, we made MariaDB take advantage of the multi-dimensional indexing and columnar format of TileDB!",
"_____no_output_____"
],
[
"To install this capability, run:\n```bash\nconda install -c conda-forge libtiledb-sql-py\n```",
"_____no_output_____"
],
[
"The usage is very simple and intuitive. All results are retunred directly as pandas dataframes.",
"_____no_output_____"
]
],
[
[
"import tiledb.sql, pandas as pd",
"_____no_output_____"
],
[
"db = tiledb.sql.connect()",
"_____no_output_____"
],
[
"%%time\npd.read_sql(sql=\"SELECT AVG(trip_distance) FROM taxi_dense_array WHERE __tiledb_rows >= 0 AND __tiledb_rows <1000\", con=db)",
"CPU times: user 10.5 ms, sys: 21.6 ms, total: 32.1 ms\nWall time: 24.7 ms\n"
],
[
"%%time\npd.read_sql(sql=\"SELECT AVG(trip_distance) FROM taxi_sparse_array WHERE tpep_dropoff_datetime <= '2019-07-31' AND fare_amount < 5.5\", con=db)",
"CPU times: user 14.4 ms, sys: 106 ms, total: 121 ms\nWall time: 47.6 ms\n"
]
],
[
[
"## Other backends",
"_____no_output_____"
],
[
"So far we have explained how to store TileDB arrays to the local disk. TileDB is optimized for [numerous storage backends](https://docs.tiledb.com/main/solutions/tiledb-embedded/backends), including AWS S3, Azure Blob Storage and more. The entire functionality shown above (including SQL queries with embedded MariaDB) \"just works\" by replacing the array names `taxi_dense_array` and `taxi_sparse_array` with a URI that points to another backend, e.g., `s3://<my_bucket>/<path>/array_name`. The TileDB data format is **cloud-native** (based on immutable objects for fast updates and time traveling, to be covered in later tutorials) and the storage engine takes it to the extreme to implement **parallel IO** while **minimizing the communication** with the backend wherever possible.",
"_____no_output_____"
],
[
"## Simple VFS Operations",
"_____no_output_____"
],
[
"In order to be able to support numerous storage backends, we abstracted all IO (e.g., read, write, remove, move, list, etc.) behind a Virtual Filesystem (VFS) class, which [we exposed in our APIs](https://docs.tiledb.com/main/solutions/tiledb-embedded/api-usage/virtual-filesystem) as it is useful beyond the array internals. Everything we describe below in this section \"just works\" for any other storage backend URI.",
"_____no_output_____"
],
[
"For example, you can use the VFS functionality to list the contents of an array folder:",
"_____no_output_____"
]
],
[
[
"vfs = tiledb.VFS()",
"_____no_output_____"
],
[
"vfs.ls(\"taxi_sparse_array\")",
"_____no_output_____"
]
],
[
[
"Or remove the arrays we created.",
"_____no_output_____"
]
],
[
[
"vfs.remove_dir(\"taxi_dense_array\")",
"_____no_output_____"
],
[
"vfs.remove_dir(\"taxi_sparse_array\")",
"_____no_output_____"
],
[
"vfs.remove_dir(\"sliced_taxi_sparse_array\")",
"_____no_output_____"
]
],
[
[
"Also you can remove the CSV file as follows.",
"_____no_output_____"
]
],
[
[
"vfs.remove_file('yellow_tripdata_2020-01.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e752851065287113f8c2afd476f8d815c168f88b | 259,399 | ipynb | Jupyter Notebook | Computing Bids/Computing Optimal Bids.ipynb | TXhadb/2021-May-Data-Science-Project | 9165c0d6452341bc7e28abe1d2f3deac21f088d3 | [
"MIT"
] | 1 | 2022-02-07T04:39:31.000Z | 2022-02-07T04:39:31.000Z | Computing Bids/Computing Optimal Bids.ipynb | gedwards09/Root-it | 48ec324c942b0cb0793f55e1c9ee63ba9e8c5e67 | [
"MIT"
] | null | null | null | Computing Bids/Computing Optimal Bids.ipynb | gedwards09/Root-it | 48ec324c942b0cb0793f55e1c9ee63ba9e8c5e67 | [
"MIT"
] | null | null | null | 96.754569 | 40,928 | 0.710616 | [
[
[
"import math\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.optimize import minimize\nfrom scipy.optimize import LinearConstraint, NonlinearConstraint\n\ndf = pd.read_csv(\"../Root_Insurance_data.csv\")\n#copying and pasting numbers without needing to run code again\n\n#_name is the 4 features\n_name = ['Currently Insured', 'Number of Vehicles', 'Number of Drivers', 'Marital Status']\n#_index keeps track of stuff\n#(currently insured, number vehicles, number drivers, marital status)\n_index = [['Y', 1, 1, 'M'],\n ['Y', 2, 2, 'M'],\n ['Y', 3, 1, 'S'],\n ['Y', 1, 2, 'S'],\n ['Y', 2, 1, 'M'],\n ['Y', 3, 2, 'M'],\n ['Y', 1, 1, 'S'],\n ['Y', 2, 2, 'S'],\n ['Y', 3, 1, 'M'],\n ['Y', 1, 2, 'M'],\n ['Y', 2, 1, 'S'],\n ['Y', 3, 2, 'S'],\n ['N', 1, 1, 'M'],\n ['N', 2, 2, 'M'],\n ['N', 3, 1, 'S'],\n ['N', 1, 2, 'S'],\n ['N', 2, 1, 'M'],\n ['N', 3, 2, 'M'],\n ['N', 1, 1, 'S'],\n ['N', 2, 2, 'S'],\n ['N', 3, 1, 'M'],\n ['N', 1, 2, 'M'],\n ['N', 2, 1, 'S'],\n ['N', 3, 2, 'S'],\n ['unknown', 1, 1, 'M'],\n ['unknown', 2, 2, 'M'],\n ['unknown', 3, 1, 'S'],\n ['unknown', 1, 2, 'S'],\n ['unknown', 2, 1, 'M'],\n ['unknown', 3, 2, 'M'],\n ['unknown', 1, 1, 'S'],\n ['unknown', 2, 2, 'S'],\n ['unknown', 3, 1, 'M'],\n ['unknown', 1, 2, 'M'],\n ['unknown', 2, 1, 'S'],\n ['unknown', 3, 2, 'S']]\n\n#click = number of clicks for each status\n#sold = number of policies sold\n#total = number in each category\nclick = [37, 21, 24, 77, 19, 18, 78, 16, 21, 76, 15, 11, 25, 33, 22, 79, 15, 21, 43, 32, 15, 74, 16, 13, 112, 94, 67, 128, 84, 73, 127, 110, 69, 105, 108, 0]\nsold = [11, 8, 5, 24, 6, 2, 38, 3, 4, 24, 4, 4, 14, 15, 8, 32, 6, 11, 25, 15, 9, 31, 5, 3, 59, 26, 26, 55, 41, 32, 69, 42, 25, 57, 44, 0]\ntotal = [274, 317, 267, 282, 284, 276, 290, 304, 294, 295, 266, 270, 300, 296, 286, 283, 263, 326, 271, 316, 265, 275, 272, 291, 294, 306, 256, 303, 268, 277, 280, 288, 287, 294, 284, 0]\n\n\n\n#probsold = number sold/10000\n#probSoldGivenClick = numbersold/number click\nprobsold=[]#probably sold\nprobSoldGivenClick = [] #prob sold given click\nfor i in range(0,len(click)):\n if click[i] !=0:\n probsold.append(sold[i]/10000)\n probSoldGivenClick.append(sold[i]/click[i])\n else:\n probsold.append(0)\n probSoldGivenClick.append(0)\n \n#prRF = pd.read_csv('probabilities.csv')\n#probRF = prRF['prob from random forests']\n \n#totalrank, total number in each rank, 1 to 5\n#clickperrank = clicks in each rank, 1 to 5\ntotalrank = [1611, 1608, 2401, 2089, 2291]\nclickperrank = [848, 462, 351, 168, 49]\n\n#dividedrankmean = average rank for each category\ndividedrankmean = [3.5693430656934306, 4.302839116719243, 4.2434456928838955, 2.5106382978723403, 4.264084507042254, 4.3768115942028984, 2.5482758620689654, 4.3256578947368425, 4.316326530612245, 2.559322033898305, 4.349624060150376, 4.325925925925926, 3.5733333333333333, 3.4932432432432434, 4.255244755244755, 2.441696113074205, 4.391634980988593, 4.245398773006135, 3.4354243542435423, 3.632911392405063, 4.305660377358491, 2.4545454545454546, 4.286764705882353, 4.243986254295533, 1.6904761904761905, 1.65359477124183, 2.58984375, 1.683168316831683, 1.7611940298507462, 2.5487364620938626, 1.6214285714285714, 1.7256944444444444, 2.491289198606272, 1.6598639455782314, 1.6866197183098592]\n#WARNING, len(dividedrankmean)= 35. This is because the very last category(['unknown', 3, 2, 'S']) is empty\n \n\n \ncompetitionrate = []#the rate that other companies are bidding for each customer type, assume exponential\ncompetitionrateuni = []#assume uniform\ntheta = [] # the rate of expected clicks for ads at rank=r.\nbid = [] # what we bid for each type of customer\n\n#fill in values\nfor i in range(0,35):\n competitionrate.append(0) #default 0\n competitionrateuni.append(0) #default 0\n bid.append(10) #default 10\n#estimated values for theta\nfor i in range(0,5):\n theta.append(clickperrank[i]/totalrank[i])\n\n#estimated values for competitionrate. Assume exponential MLE.\nfor i in range(0, 35):\n competitionrate[i] = -10/(np.log((dividedrankmean[i]-1)/4))\nfor i in range(0, 35):\n competitionrateuni[i] = 40/(5-dividedrankmean[i])\n",
"_____no_output_____"
]
],
[
[
"Here, we define the functions, Expected cost, expected number of policies sold, and the expected cost per policy sold. I assume the model for bids is exponential.",
"_____no_output_____"
]
],
[
[
"#expectedcost, expectedsold, expected cost per policy sold\ndef expectedsold(bid):#expected number of policies sold\n for i in range(0, len(bid)):\n if bid[i]<0:\n return 0\n e = 0\n for i in range(0, 35):#35 because click at 36 = 0, lambda will cause errors\n for r in range(1, 6):\n if bid[i]<0:\n e = e +0\n else:\n e = e + total[i]*probSoldGivenClick[i]*theta[r-1]*math.comb(4, r-1)*(np.exp(-bid[i]/competitionrate[i]))**(r-1)*(1-np.exp(-bid[i]/competitionrate[i]))**(5-r)\n #print(e)\n return e\ndef expectedcost(bid):#expected cost of policy\n e = 0\n for i in range(0, 35):\n for r in range(1, 6):\n if bid[i]<0:\n e = e+0\n else:\n e = e + total[i]*bid[i]*theta[r-1]*math.comb(4, r-1)*(np.exp(-bid[i]/competitionrate[i]))**(r-1)*(1-np.exp(-bid[i]/competitionrate[i]))**(5-r)\n #print(e)\n return e\n\ndef costpersold(bid):\n return expectedcost(bid)/expectedsold(bid)\ndef constraint(bid): #constaining that the expected number of policies sold is more than 400\n return expectedsold(bid)\n",
"_____no_output_____"
]
],
[
[
"Some additional functions",
"_____no_output_____"
]
],
[
[
"#expected clicks\n\ndef expectedclick(bid):#expected clicks\n e = 0\n for i in range(0, 35):\n for r in range(1, 6):\n if bid[i]<0:\n e = e+0\n else:\n e = e + click[i]*theta[r-1]*math.comb(4, r-1)*(np.exp(-bid[i]/competitionrate[i]))**(r-1)*(1-np.exp(-bid[i]/competitionrate[i]))**(5-r)\n #print(e)\n return e",
"_____no_output_____"
]
],
[
[
"I compute the gradient of expectedcost",
"_____no_output_____"
]
],
[
[
"#expected cost derivative\ndef grad_cost(bid):\n gradient = []\n for i in range(0,35):\n k = 0\n for r in range(1,6):\n if bid[i]<0:\n k = k+0\n else:\n k = k + click[i]*theta[r-1]*math.comb(4, r-1)*((np.exp(-bid[i]/competitionrate[i]))**(r-1)*(1-np.exp(-bid[i]/competitionrate[i]))**(5-r) + bid[i]*(np.exp(-bid[i]/competitionrate[i]))**(r-2)*(r-1)*(-np.exp(-bid[i]/competitionrate[i])/competitionrate[i])*(1-np.exp(-bid[i]/competitionrate[i]))**(5-r) +bid[i]*(1-np.exp(-bid[i]/competitionrate[i]))**(4-r)*(5-r)*(np.exp(-bid[i]/competitionrate[i])/competitionrate[i])*(np.exp(-bid[i]/competitionrate[i]))**(r-1)) \n gradient.append(k)\n np.array(gradient)\n return gradient",
"_____no_output_____"
]
],
[
[
"Here we optimize costpersold()",
"_____no_output_____"
]
],
[
[
"#hessian is zero matrix if needed\ndef cons_H(x, v):\n return np.zeros((35,35))\n#nonlinar constraint is the constraint function is bounded from 400 to 1000\nnonlinear_constraint = NonlinearConstraint(constraint, 400, 1000)#, hess=cons_H)\n#linear constraint is each bid is between 1 to 50\nlincon = LinearConstraint(np.identity(35), np.linspace(0.01, 0.01, num=35), np.linspace(50000, 50000, num=35))",
"_____no_output_____"
],
[
"nonlin =[]\nfor i in range(0,10):\n nonlin.append(NonlinearConstraint(constraint, 100+i*100, 100+i*100, hess=cons_H))\n \nres = []\nprint(datetime.datetime.now())\nfor i in range(0,10):\n res.append(minimize(expectedcost, competitionrate, method='trust-constr', constraints=[lincon, nonlin[i]], options={'maxiter':5000})) \n print(datetime.datetime.now())\nfor i in range(0,10):\n print(expectedcost(res[i].x), expectedsold(res[i].x), costpersold(res[i].x))",
"2021-05-26 03:13:05.601465\n"
],
[
"print(expectedcost(res[3].x), expectedsold(res[3].x), costpersold(res[3].x))\nfor i in range(0,len(res)):\n print(res[i].execution_time, res[i].niter, res[i].success)\n print(expectedcost(res[i].x), expectedsold(res[i].x), costpersold(res[i].x))",
"3761.859313508475 399.9999999999999 9.404648283771191\n22.76661467552185 157 True\n33.96841100112909 99.99999999999997 0.339684110011291\n45.92473888397217 269 True\n644.9672914434103 199.99999999999997 3.224836457217052\n61.344897985458374 345 True\n1889.1481206645906 300.0 6.297160402215302\n51.99087572097778 345 True\n3761.859313508475 399.9999999999999 9.404648283771191\n225.4765899181366 1499 True\n6311.57098869516 499.9999999999997 12.623141977390327\n373.01617193222046 2328 True\n9623.238740317436 600.0 16.038731233862393\n670.3707628250122 5001 False\n13816.51853809546 699.999999933377 19.737883627729225\n958.8748323917389 5001 False\n19098.125543193484 800.0000000000127 23.872656928991475\n950.2957923412323 5001 False\n25534.99959141661 899.9999999999989 28.372221768240713\n948.0617001056671 5001 False\n36090.91203633987 1000.0000000000002 36.09091203633986\n"
]
],
[
[
"Going to run gradient descent on expectedcost",
"_____no_output_____"
]
],
[
[
"#code modified from https://stackabuse.com/gradient-descent-in-python-implementation-and-theory/\n\n# Make threshold a -ve value if you want to run exactly\n# max_iterations.\ndef gradient_descent(max_iterations,threshold,w_init,\n obj_func,grad_func,\n learning_rate=0.05,momentum=0.8):\n \n w = w_init\n w_history = w\n f_history = obj_func(w)\n delta_w = np.zeros(w.shape)\n i = 0\n diff = 1.0e10\n \n while i<max_iterations and diff>threshold:\n for i in range(0,len(delta_w)-1):\n delta_w[i] = -learning_rate*grad_func(w)[i] + momentum*delta_w[i]\n w = w+delta_w\n \n # store the history of w and f\n w_history = np.vstack((w_history,w))\n f_history = np.vstack((f_history,obj_func(w)))\n \n # update iteration number and diff between successive values\n # of objective function\n i+=1\n diff = np.absolute(f_history[-1]-f_history[-2])\n \n return w_history,f_history\n\nresgrad = gradient_descent(10000, 0.05, np.array(bid), expectedcost, grad_cost)",
"_____no_output_____"
],
[
"resgrad",
"_____no_output_____"
],
[
"x = [100+i*100 for i in range(0,9)]\ny = [expectedcost(res[i].x) for i in range(0,9)]\n\nplt.plot(x,y)\nplt.show()",
"_____no_output_____"
],
[
"x = [100+i*100 for i in range(0,9)]\ny = [costpersold(res[i].x) for i in range(0,9)]\n\nplt.figure(figsize = (16,12))\nplt.plot(x,y)\nplt.xlabel(\"Number of Policies Sold\", fontsize = 16)\nplt.ylabel(\"Cost per Policy Sold\", fontsize = 16)\nplt.title(\"Cost per Policy Sold\", fontsize = 20)\n\nplt.show()",
"_____no_output_____"
],
[
"currentinsured, numcars, numdrivers, married = [], [],[],[]\n\nfor i in range(0, len(_index)):\n currentinsured.append(_index[i][0])\n numcars.append(_index[i][1])\n numdrivers.append(_index[i][2])\n married.append(_index[i][3])\n\noptimizedbids = pd.DataFrame({\"Currently Insured\":currentinsured[:-1],\n \"Number of Vehicles\":numcars[:-1],\n 'Number of Drivers':numdrivers[:-1],\n 'Marital Status':married[:-1],\n 'Estimated Competition Average':competitionrate,\n 'Bids to get 100 Sales':res[0].x,\n 'Bids to get 200 Sales':res[1].x,\n 'Bids to get 300 Sales':res[2].x,\n 'Bids to get 400 Sales':res[3].x,\n 'Bids to get 500 Sales':res[4].x,\n 'Bids to get 600 Sales':res[5].x,\n 'Bids to get 700 Sales':res[6].x,\n 'Bids to get 800 Sales':res[7].x,\n 'Bids to get 900 Sales':res[8].x,\n 'Bids to get 1000 Sales':res[9].x,})\n",
"_____no_output_____"
],
[
"optimizedbids",
"_____no_output_____"
],
[
"optimizedbids.to_csv(\"optimizedbidsexp.csv\", index=False)",
"_____no_output_____"
]
],
[
[
"Now I find the optimized bids assuming the model is uniformly distributed. First, the functions.",
"_____no_output_____"
]
],
[
[
"def expectedsolduni(bid):#expected number of policies sold\n for i in range(0, len(bid)):\n if bid[i]<0:\n return 0\n e = 0\n for i in range(0, 35):#35 because click at 36 = 0, lambda will cause errors\n for r in range(1, 6):\n if bid[i]<0:\n e = e +0\n else:\n e = e + total[i]*probSoldGivenClick[i]*theta[r-1]*math.comb(4, r-1)*(1- bid[i]/competitionrateuni[i])**(r-1)*(bid[i]/competitionrateuni[i])**(5-r)\n #print(e)\n return e\n\ndef expectedcostuni(bid):#expected cost of policy\n e = 0\n for i in range(0, 35):\n for r in range(1, 6):\n if bid[i]<0:\n e = e+0\n else:\n e = e + total[i]*bid[i]*theta[r-1]*math.comb(4, r-1)*(1- bid[i]/competitionrateuni[i])**(r-1)*(bid[i]/competitionrateuni[i])**(5-r)\n #print(e)\n return e\n\ndef constraintuni(bid): #constaining that the expected number of policies sold is more than 400\n for i in range(0, len(bid)):\n if bid[i]<0:\n return 0\n e = 0\n for i in range(0, 35):#35 because click at 36 = 0, lambda will cause errors\n for r in range(1, 6):\n if bid[i]<0:\n e = e +0\n else:\n e = e + total[i]*probSoldGivenClick[i]*theta[r-1]*math.comb(4,r-1)*(1- bid[i]/competitionrateuni[i])**(r-1)*(bid[i]/competitionrateuni[i])**(5-r)\n #print(e)\n return e",
"_____no_output_____"
],
[
"nonlinuni =[]\nfor i in range(0,10):\n nonlinuni.append(NonlinearConstraint(constraintuni, 100+i*100, 100+i*100, hess=cons_H))",
"_____no_output_____"
],
[
"result_uni = []\nprint(datetime.datetime.now())\nfor i in range(0,10):\n result_uni.append(minimize(expectedcostuni, competitionrateuni, method='trust-constr', constraints=[lincon, nonlinuni[i]],))\n print(datetime.datetime.now())",
"2021-05-26 04:25:00.824079\n2021-05-26 04:25:14.708865\n2021-05-26 04:25:31.807700\n2021-05-26 04:26:34.431609\n2021-05-26 04:27:18.165579\n2021-05-26 04:28:22.111770\n2021-05-26 04:29:27.507528\n2021-05-26 04:29:36.699926\n2021-05-26 04:29:53.672226\n2021-05-26 04:30:14.422086\n2021-05-26 04:30:35.451769\n"
],
[
"for i in range(0,10):\n print(expectedcostuni(result_uni[i].x), expectedsolduni(result_uni[i].x), expectedcostuni(result_uni[i].x)/expectedsolduni(result_uni[i].x))",
"65.46589663386851 99.99999999999 0.6546589663387506\n1048.4176558924369 200.00000000000003 5.242088279462184\n2774.5359044475854 300.00000000001467 9.248453014824833\n5032.491978185759 400.00000000000006 12.581229945464395\n7710.440709791023 500.00000000001756 15.420881419581505\n10740.759894809737 600.0000000000242 17.90126649134884\n14078.222476735686 699.9999999999903 20.111746395336972\n17690.47263618005 799.9999999999997 22.11309079522507\n21553.030423436074 900.0 23.947811581595637\n25646.671471003116 1000.0000000000001 25.646671471003113\n"
],
[
"bidsuni = pd.DataFrame({\"Currently Insured\":currentinsured[:-1],\n \"Number of Vehicles\":numcars[:-1],\n 'Number of Drivers':numdrivers[:-1],\n 'Marital Status':married[:-1],\n 'Estimated Competition Average':competitionrateuni,\n 'Bids to get 100 Sales':result_uni[0].x,\n 'Bids to get 200 Sales':result_uni[1].x,\n 'Bids to get 300 Sales':result_uni[2].x,\n 'Bids to get 400 Sales':result_uni[3].x,\n 'Bids to get 500 Sales':result_uni[4].x,\n 'Bids to get 600 Sales':result_uni[5].x,\n 'Bids to get 700 Sales':result_uni[6].x,\n 'Bids to get 800 Sales':result_uni[7].x,\n 'Bids to get 900 Sales':result_uni[8].x,\n 'Bids to get 1000 Sales':result_uni[9].x,})",
"_____no_output_____"
],
[
"bidsuni.to_csv('optimizedbidsuni.csv')",
"_____no_output_____"
],
[
"x = bidsuni['Estimated Competition Average']\ny = bidsuni['Bids to get 400 Sales']\n\nplt.scatter(x,y)\nplt.show()",
"_____no_output_____"
],
[
"bidsuni",
"_____no_output_____"
],
[
"x = optimizedbids['Estimated Competition Average']\ny1 = optimizedbids['Bids to get 400 Sales']\ny2 = optimizedbids['Bids to get 500 Sales']\ny3 = optimizedbids['Bids to get 600 Sales']\ny4 = optimizedbids['Bids to get 700 Sales']\ny5 = optimizedbids['Bids to get 800 Sales']\ny6 = optimizedbids['Bids to get 900 Sales']\n\nfig, axs = plt.subplots(1, 6, figsize = (60,6), sharey=True)\n\naxs[0].scatter(x, y1)\naxs[1].scatter(x, y2)\naxs[2].scatter(x, y3)\naxs[3].scatter(x, y4)\naxs[4].scatter(x, y5)\naxs[5].scatter(x, y6)\n\nplt.show()",
"_____no_output_____"
],
[
"x = bidsuni['Estimated Competition Average']\ny1 = bidsuni['Bids to get 400 Sales']\ny2 = bidsuni['Bids to get 500 Sales']\ny3 = bidsuni['Bids to get 600 Sales']\ny4 = bidsuni['Bids to get 700 Sales']\ny5 = bidsuni['Bids to get 800 Sales']\ny6 = bidsuni['Bids to get 900 Sales']\ny7 = bidsuni['Bids to get 1000 Sales']\n\nfig, axs = plt.subplots(1, 7, figsize = (60,6), sharey=True)\n\naxs[0].scatter(x, y1)\naxs[1].scatter(x, y2)\naxs[2].scatter(x, y3)\naxs[3].scatter(x, y4)\naxs[4].scatter(x, y5)\naxs[5].scatter(x, y6)\naxs[6].scatter(x, y7)\n\nplt.show()",
"_____no_output_____"
],
[
"x = [100+i*100 for i in range(0,10)]\ny = [expectedcostuni(result_uni[i].x)/expectedsolduni(result_uni[i].x) for i in range(0,10)]\n\nplt.figure(figsize = (10,8))\nplt.plot(x,y)\nplt.xlabel(\"Number of Policies Sold\", fontsize = 16)\nplt.ylabel(\"Cost per Policy Sold\", fontsize = 16)\nplt.title(\"Cost per Policy Sold\", fontsize = 20)\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7529e7a4f92d6e1e50149996685d1cb5eb77274 | 27,962 | ipynb | Jupyter Notebook | analysis/004_Plot_network_training.ipynb | chrelli/3DDD_social_mouse_tracker | 291d2ed90029628dd65db0ce3e8972b721159a15 | [
"Apache-2.0"
] | 1 | 2022-02-10T07:26:09.000Z | 2022-02-10T07:26:09.000Z | analysis/004_Plot_network_training.ipynb | chrelli/3DDD_social_mouse_tracker | 291d2ed90029628dd65db0ce3e8972b721159a15 | [
"Apache-2.0"
] | 1 | 2022-02-11T06:55:29.000Z | 2022-02-12T22:26:44.000Z | analysis/004_Plot_network_training.ipynb | chrelli/3DDD_social_mouse_tracker | 291d2ed90029628dd65db0ce3e8972b721159a15 | [
"Apache-2.0"
] | null | null | null | 109.654902 | 20,520 | 0.852514 | [
[
[
"# plot the training performance!\n\nimport time\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\n\nimport sys, os, pickle\nimport h5py\nimport cv2\nfrom colour import Color\nimport glob, pathlib\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"# Load the loss function across training",
"_____no_output_____"
]
],
[
[
"# you will find the training runs in ./Runs/\n# these training traces are just csv's exported from tensorboard, \n# They are exported to make a figrue for the manuscript -- you can just look at the runs in tensorboard\n\nrun_csv_folder = 'example_data/training_traces/'\nfiles = os.listdir(run_csv_folder)\nfiles_txt = [i for i in files if i.endswith('.csv')]\nfiles_txt.sort()\nfor f in files_txt:\n print(f)",
"run-Nov22_11-19-19_CE-01-tag-lr(1).csv\nrun-Nov22_11-19-19_CE-01-tag-trn_frame_loss.csv\nrun-Nov22_11-19-19_CE-01-tag-trn_loss.csv\nrun-Nov22_11-19-19_CE-01-tag-val_frame_loss.csv\nrun-Nov22_11-19-19_CE-01-tag-val_loss.csv\n"
],
[
"val_frame_loss = pd.read_csv(glob.glob(run_csv_folder+'*val_frame_loss*')[0])\nval_loss = pd.read_csv(glob.glob(run_csv_folder+'*val_loss*')[0])\ntrn_frame_loss = pd.read_csv(glob.glob(run_csv_folder+'*trn_frame_loss*')[0])\ntrn_loss = pd.read_csv(glob.glob(run_csv_folder+'*trn_loss*')[0])\nprint(val_frame_loss)\n",
" Wall time Step Value\n0 1.574440e+09 0 5138.962891\n1 1.574440e+09 1 4729.284668\n2 1.574440e+09 2 4470.230469\n3 1.574440e+09 3 5048.220703\n4 1.574440e+09 4 5750.927246\n.. ... ... ...\n665 1.574443e+09 133000 894.225952\n666 1.574443e+09 133001 1448.598633\n667 1.574443e+09 133002 1281.904907\n668 1.574443e+09 133003 1145.183350\n669 1.574443e+09 133004 1027.295776\n\n[670 rows x 3 columns]\n"
]
],
[
[
"# Plot the training and validation loss",
"_____no_output_____"
]
],
[
[
"import matplotlib\n\n# Say, \"the default sans-serif font is COMIC SANS\"\nmatplotlib.rcParams['font.sans-serif'] = \"Liberation Sans\"\n# Then, \"ALWAYS use sans-serif fonts\"\nmatplotlib.rcParams['font.family'] = \"sans-serif\"\n\nmatplotlib.rc('font', family='sans-serif') \nmatplotlib.rc('text', usetex='false') \nmatplotlib.rcParams.update({'font.size': 13})",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(2, gridspec_kw={'height_ratios': [3, 1]}, sharex=False,figsize=(3.5,3))\nfrom palettable.cmocean.sequential import Algae_6\ncmpl = Algae_6.mpl_colors\n\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 10)) # outward by 10 points\n spine.set_smart_bounds(True)\n else:\n spine.set_color('none') # don't draw spine\n\n # turn off ticks where there is no spine\n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n # no yaxis ticks\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n # no xaxis ticks\n ax.xaxis.set_ticks([])\n\n\ndef moving_average(data_set, periods=3):\n weights = np.ones(periods) / periods\n return np.convolve(data_set, weights, mode='same').ravel()\n\ntotal_time = (trn_frame_loss['Wall time'].max()-trn_frame_loss['Wall time'].min())/60\ntime_stretch = total_time/len(trn_frame_loss['Value'])\n\n\n# plt.subplot(4,1,(1,2,3) )\nloss = trn_frame_loss['Value']\nloss_ = loss.copy()\ntime = np.arange(len(loss)) * time_stretch\ntime_ = time.copy() \n# ax1.plot(time_,loss_,'.',c=cmpl[1],label=\"training batch\",alpha = 1,markersize=4)\n\nloss = val_frame_loss['Value']\nscaling = len(loss_)/len(loss)\ntime = np.arange(len(loss))*scaling * time_stretch\nax1.plot(time,loss,'.',c=cmpl[4],label=\"validation batch\",alpha = 1,markersize=4)\nax1.plot(time_,loss_,'.',c=cmpl[1],label=\"training batch\",alpha = .5,markersize=4)\n\nfrom scipy.signal import savgol_filter\n\nax = plt.gca()\nax1.spines['top'].set_visible(False)\nax1.spines['right'].set_visible(False)\nax1.spines['bottom'].set_visible(False)\nax2.spines['top'].set_visible(False)\nax2.spines['right'].set_visible(False)\n\n\n# ax1.legend(loc='upper right')\n\nax1.set_ylim(300,10000)\n\n\nax1.set_yscale('log')\nax2.set_xlabel(\"Time [min]\")\nax2.set_ylabel(\"Learning rate\")\nax1.set_ylabel(\"Loss\")\n\ntrn_lr = pd.read_csv(glob.glob(run_csv_folder+'*tag-lr*')[0])\ntime = np.arange(len(trn_lr))/len(trn_lr)*total_time\nax2.plot(time,trn_lr['Value'],c=cmpl[2])\nprint(len(trn_lr))\n\nax2.set_yscale('log')\n\n# plt.xlim(0,45)\nadjust_spines(ax1, ['left'])\nadjust_spines(ax2, ['left', 'bottom'])\n\nax2.set_xticks([0,15,30,45,60])\nax2.set_yticks([1e-8,1e-3])\n\nplt.show()\n",
"134\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e752a8518df2b17212288c33b27370195af7a41d | 99,170 | ipynb | Jupyter Notebook | rapids/rapids-cuDF-cuml-01.ipynb | martin-fabbri/colab-notebooks | 03658a7772fbe71612e584bbc767009f78246b6b | [
"Apache-2.0"
] | 8 | 2020-01-18T18:39:49.000Z | 2022-02-17T19:32:26.000Z | rapids/rapids-cuDF-cuml-01.ipynb | martin-fabbri/colab-notebooks | 03658a7772fbe71612e584bbc767009f78246b6b | [
"Apache-2.0"
] | null | null | null | rapids/rapids-cuDF-cuml-01.ipynb | martin-fabbri/colab-notebooks | 03658a7772fbe71612e584bbc767009f78246b6b | [
"Apache-2.0"
] | 6 | 2020-01-18T18:40:02.000Z | 2020-09-27T09:26:38.000Z | 118.341289 | 22,970 | 0.755601 | [
[
[
"<a href=\"https://colab.research.google.com/github/martin-fabbri/colab-notebooks/blob/master/rapids-cuDF-cuml-01.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Introduction to cuML",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"Wed Sep 11 19:48:12 2019 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 430.40 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 42C P8 9W / 70W | 0MiB / 15079MiB | 0% Default |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: GPU Memory |\n| GPU PID Type Process name Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"!wget -nc https://github.com/rapidsai/notebooks-extended/raw/master/utils/rapids-colab.sh\n!bash rapids-colab.sh\n\nimport sys, os\n\nsys.path.append('/usr/local/lib/python3.6/site-packages/')\nos.environ['NUMBAPRO_NVVM'] = '/usr/local/cuda/nvvm/lib64/libnvvm.so'\nos.environ['NUMBAPRO_LIBDEVICE'] = '/usr/local/cuda/nvvm/libdevice/'",
"--2019-09-11 19:48:17-- https://github.com/rapidsai/notebooks-extended/raw/master/utils/rapids-colab.sh\nResolving github.com (github.com)... 140.82.118.3\nConnecting to github.com (github.com)|140.82.118.3|:443... connected.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: https://github.com/rapidsai/notebooks-contrib/raw/master/utils/rapids-colab.sh [following]\n--2019-09-11 19:48:17-- https://github.com/rapidsai/notebooks-contrib/raw/master/utils/rapids-colab.sh\nReusing existing connection to github.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/rapidsai/notebooks-contrib/master/utils/rapids-colab.sh [following]\n--2019-09-11 19:48:17-- https://raw.githubusercontent.com/rapidsai/notebooks-contrib/master/utils/rapids-colab.sh\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1609 (1.6K) [text/plain]\nSaving to: ‘rapids-colab.sh’\n\n\rrapids-colab.sh 0%[ ] 0 --.-KB/s \rrapids-colab.sh 100%[===================>] 1.57K --.-KB/s in 0s \n\n2019-09-11 19:48:17 (302 MB/s) - ‘rapids-colab.sh’ saved [1609/1609]\n\n--2019-09-11 19:48:19-- https://github.com/rapidsai/notebooks-extended/raw/master/utils/env-check.py\nResolving github.com (github.com)... 140.82.118.3\nConnecting to github.com (github.com)|140.82.118.3|:443... connected.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: https://github.com/rapidsai/notebooks-contrib/raw/master/utils/env-check.py [following]\n--2019-09-11 19:48:19-- https://github.com/rapidsai/notebooks-contrib/raw/master/utils/env-check.py\nReusing existing connection to github.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/rapidsai/notebooks-contrib/master/utils/env-check.py [following]\n--2019-09-11 19:48:19-- https://raw.githubusercontent.com/rapidsai/notebooks-contrib/master/utils/env-check.py\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 783 [text/plain]\nSaving to: ‘env-check.py’\n\nenv-check.py 100%[===================>] 783 --.-KB/s in 0s \n\n2019-09-11 19:48:19 (127 MB/s) - ‘env-check.py’ saved [783/783]\n\nChecking for GPU type:\n*********************************************\nWoo! Your instance has the right kind of GPU!\n*********************************************\n\nRemoving conflicting packages, will replace with RAPIDS compatible versions\nUninstalling xgboost-0.90:\n Successfully uninstalled xgboost-0.90\nUninstalling dask-1.1.5:\n Successfully uninstalled dask-1.1.5\nUninstalling distributed-1.25.3:\n Successfully uninstalled distributed-1.25.3\nInstalling conda\n--2019-09-11 19:48:23-- https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh\nResolving repo.continuum.io (repo.continuum.io)... 104.18.200.79, 104.18.201.79, 2606:4700::6812:c84f, ...\nConnecting to repo.continuum.io (repo.continuum.io)|104.18.200.79|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 58468498 (56M) [application/x-sh]\nSaving to: ‘Miniconda3-4.5.4-Linux-x86_64.sh’\n\nMiniconda3-4.5.4-Li 100%[===================>] 55.76M 106MB/s in 0.5s \n\n2019-09-11 19:48:24 (106 MB/s) - ‘Miniconda3-4.5.4-Linux-x86_64.sh’ saved [58468498/58468498]\n\nPREFIX=/usr/local\ninstalling: python-3.6.5-hc3d631a_2 ...\nPython 3.6.5 :: Anaconda, Inc.\ninstalling: ca-certificates-2018.03.07-0 ...\ninstalling: conda-env-2.6.0-h36134e3_1 ...\ninstalling: libgcc-ng-7.2.0-hdf63c60_3 ...\ninstalling: libstdcxx-ng-7.2.0-hdf63c60_3 ...\ninstalling: libffi-3.2.1-hd88cf55_4 ...\ninstalling: ncurses-6.1-hf484d3e_0 ...\ninstalling: openssl-1.0.2o-h20670df_0 ...\ninstalling: tk-8.6.7-hc745277_3 ...\ninstalling: xz-5.2.4-h14c3975_4 ...\ninstalling: yaml-0.1.7-had09818_2 ...\ninstalling: zlib-1.2.11-ha838bed_2 ...\ninstalling: libedit-3.1.20170329-h6b74fdf_2 ...\ninstalling: readline-7.0-ha6073c6_4 ...\ninstalling: sqlite-3.23.1-he433501_0 ...\ninstalling: asn1crypto-0.24.0-py36_0 ...\ninstalling: certifi-2018.4.16-py36_0 ...\ninstalling: chardet-3.0.4-py36h0f667ec_1 ...\ninstalling: idna-2.6-py36h82fb2a8_1 ...\ninstalling: pycosat-0.6.3-py36h0a5515d_0 ...\ninstalling: pycparser-2.18-py36hf9f622e_1 ...\ninstalling: pysocks-1.6.8-py36_0 ...\ninstalling: ruamel_yaml-0.15.37-py36h14c3975_2 ...\ninstalling: six-1.11.0-py36h372c433_1 ...\ninstalling: cffi-1.11.5-py36h9745a5d_0 ...\ninstalling: setuptools-39.2.0-py36_0 ...\ninstalling: cryptography-2.2.2-py36h14c3975_0 ...\ninstalling: wheel-0.31.1-py36_0 ...\ninstalling: pip-10.0.1-py36_0 ...\ninstalling: pyopenssl-18.0.0-py36_0 ...\ninstalling: urllib3-1.22-py36hbe7ace6_0 ...\ninstalling: requests-2.18.4-py36he2e5f8d_1 ...\ninstalling: conda-4.5.4-py36_0 ...\ninstallation finished.\nWARNING:\n You currently have a PYTHONPATH environment variable set. This may cause\n unexpected behavior when running the Python interpreter in Miniconda3.\n For best results, please verify that your PYTHONPATH only points to\n directories of packages that are compatible with the Python interpreter\n in Miniconda3: /usr/local\nInstalling RAPIDS 0.10 packages\nPlease standby, this will take a few minutes...\n\n\n==> WARNING: A newer version of conda exists. <==\n current version: 4.5.4\n latest version: 4.7.11\n\nPlease update conda by running\n\n $ conda update -n base conda\n\n\ncffi-1.12.3 | 218 KB | : 100% 1.0/1 [00:00<00:00, 13.00it/s]\nopenssl-1.1.1c | 2.1 MB | : 100% 1.0/1 [00:00<00:00, 3.06it/s] \ncudatoolkit-10.0.130 | 380.0 MB | : 100% 1.0/1 [00:44<00:00, 171.58s/it] \nnccl-2.4.6.1 | 66.6 MB | : 100% 1.0/1 [00:11<00:00, 11.52s/it] \ncudf-0.10.0a | 4.8 MB | : 100% 1.0/1 [00:02<00:00, 2.39s/it] \ncython-0.29.13 | 2.2 MB | : 100% 1.0/1 [00:00<00:00, 1.91it/s] \nscipy-1.3.1 | 18.1 MB | : 100% 1.0/1 [00:03<00:00, 3.13s/it] \njinja2-2.10.1 | 91 KB | : 100% 1.0/1 [00:00<00:00, 13.54it/s]\nfreetype-2.10.0 | 884 KB | : 100% 1.0/1 [00:00<00:00, 5.18it/s] \nmsgpack-python-0.6.1 | 89 KB | : 100% 1.0/1 [00:00<00:00, 21.47it/s]\nc-ares-1.15.0 | 100 KB | : 100% 1.0/1 [00:00<00:00, 2.34it/s] \ndouble-conversion-3. | 85 KB | : 100% 1.0/1 [00:00<00:00, 18.99it/s]\ncytoolz-0.10.0 | 429 KB | : 100% 1.0/1 [00:00<00:00, 9.86it/s]\nlibevent-2.1.10 | 1.3 MB | : 100% 1.0/1 [00:00<00:00, 2.90it/s] \npyarrow-0.14.1 | 2.8 MB | : 100% 1.0/1 [00:00<00:00, 1.37it/s] \nlibcuml-0.10.0a | 29.3 MB | : 100% 1.0/1 [00:07<00:00, 7.80s/it] \npynvml-8.0.3 | 30 KB | : 100% 1.0/1 [00:00<00:00, 15.75it/s]\nncurses-6.1 | 1.3 MB | : 100% 1.0/1 [00:00<00:00, 1.25it/s] \nfsspec-0.4.4 | 39 KB | : 100% 1.0/1 [00:00<00:00, 14.74it/s]\ntornado-6.0.3 | 636 KB | : 100% 1.0/1 [00:00<00:00, 5.12it/s] \nbrotli-1.0.7 | 1.0 MB | : 100% 1.0/1 [00:00<00:00, 6.41it/s] \npython-dateutil-2.8. | 219 KB | : 100% 1.0/1 [00:00<00:00, 18.93it/s]\ncachetools-2.1.0 | 10 KB | : 100% 1.0/1 [00:00<00:00, 32.88it/s]\nrsa-3.4.2 | 31 KB | : 100% 1.0/1 [00:00<00:00, 21.24it/s]\nsix-1.12.0 | 22 KB | : 100% 1.0/1 [00:00<00:00, 21.80it/s]\nboost-cpp-1.70.0 | 21.1 MB | : 100% 1.0/1 [00:13<00:00, 13.07s/it] \npillow-6.1.0 | 634 KB | : 100% 1.0/1 [00:00<00:00, 4.06it/s] \nlibcblas-3.8.0 | 10 KB | : 100% 1.0/1 [00:00<00:00, 23.64it/s]\nsnappy-1.1.7 | 39 KB | : 100% 1.0/1 [00:00<00:00, 26.26it/s]\ngrpc-cpp-1.23.0 | 4.5 MB | : 100% 1.0/1 [00:00<00:00, 1.06it/s] \ncuml-0.10.0a | 6.0 MB | : 100% 1.0/1 [00:02<00:00, 2.43s/it] \nclick-7.0 | 61 KB | : 100% 1.0/1 [00:00<00:00, 18.59it/s]\nlz4-c-1.8.3 | 187 KB | : 100% 1.0/1 [00:00<00:00, 3.12it/s] \nca-certificates-2019 | 145 KB | : 100% 1.0/1 [00:00<00:00, 21.29it/s]\nmarkupsafe-1.1.1 | 26 KB | : 100% 1.0/1 [00:00<00:00, 25.11it/s]\npyyaml-5.1.2 | 184 KB | : 100% 1.0/1 [00:00<00:00, 11.67it/s]\nthrift-cpp-0.12.0 | 2.4 MB | : 100% 1.0/1 [00:00<00:00, 2.06it/s] \nlibxgboost-0.90.rapi | 32.8 MB | : 100% 1.0/1 [00:07<00:00, 7.98s/it] \nllvmlite-0.29.0 | 19.9 MB | : 100% 1.0/1 [00:03<00:00, 3.07s/it] \noauthlib-3.0.1 | 82 KB | : 100% 1.0/1 [00:00<00:00, 15.69it/s]\nxgboost-0.90.rapidsd | 11 KB | : 100% 1.0/1 [00:00<00:00, 1.15it/s] \ndask-cuml-0.8.0a | 30 KB | : 100% 1.0/1 [00:00<00:00, 2.68it/s] \ngflags-2.2.2 | 177 KB | : 100% 1.0/1 [00:00<00:00, 13.60it/s]\ncryptography-2.7 | 607 KB | : 100% 1.0/1 [00:00<00:00, 5.10it/s] \nlibcumlprims-0.9.0 | 3.9 MB | : 100% 1.0/1 [00:01<00:00, 1.65s/it] \nidna-2.8 | 132 KB | : 100% 1.0/1 [00:00<00:00, 17.98it/s]\nheapdict-1.0.0 | 7 KB | : 100% 1.0/1 [00:00<00:00, 30.73it/s]\nlibcugraph-0.10.0a | 11.2 MB | : 100% 1.0/1 [00:02<00:00, 2.20s/it] \ncertifi-2019.6.16 | 149 KB | : 100% 1.0/1 [00:00<00:00, 22.41it/s]\narrow-cpp-0.14.1 | 17.3 MB | : 100% 1.0/1 [00:02<00:00, 2.80s/it] \nchardet-3.0.4 | 190 KB | : 100% 1.0/1 [00:00<00:00, 8.50it/s] \nsortedcontainers-2.1 | 25 KB | : 100% 1.0/1 [00:00<00:00, 31.15it/s]\nlibrmm-0.10.0a | 44 KB | : 100% 1.0/1 [00:00<00:00, 3.22it/s] \nlibnvstrings-0.10.0a | 24.7 MB | : 100% 1.0/1 [00:05<00:00, 5.29s/it] \nlibcudf-0.10.0a | 27.8 MB | : 100% 1.0/1 [00:05<00:00, 5.90s/it] \nfastavro-0.22.4 | 405 KB | : 100% 1.0/1 [00:00<00:00, 9.40it/s]\nolefile-0.46 | 31 KB | : 100% 1.0/1 [00:00<00:00, 20.54it/s]\nuriparser-0.9.3 | 49 KB | : 100% 1.0/1 [00:00<00:00, 25.69it/s]\nwheel-0.33.6 | 35 KB | : 100% 1.0/1 [00:00<00:00, 23.46it/s]\nyaml-0.1.7 | 78 KB | : 100% 1.0/1 [00:00<00:00, 17.77it/s]\nliblapack-3.8.0 | 10 KB | : 100% 1.0/1 [00:00<00:00, 24.24it/s]\nsetuptools-41.2.0 | 634 KB | : 100% 1.0/1 [00:00<00:00, 5.33it/s] \nnumpy-1.17.2 | 5.2 MB | : 100% 1.0/1 [00:01<00:00, 1.08s/it] \n_libgcc_mutex-0.1 | 3 KB | : 100% 1.0/1 [00:00<00:00, 46.40it/s]\ndecorator-4.4.0 | 11 KB | : 100% 1.0/1 [00:00<00:00, 34.59it/s]\ncloudpickle-1.2.2 | 23 KB | : 100% 1.0/1 [00:00<00:00, 34.29it/s]\nglog-0.4.0 | 104 KB | : 100% 1.0/1 [00:00<00:00, 22.23it/s]\nlibblas-3.8.0 | 10 KB | : 100% 1.0/1 [00:00<00:00, 36.48it/s]\npip-19.2.3 | 1.9 MB | : 100% 1.0/1 [00:00<00:00, 2.00it/s] \nlibstdcxx-ng-9.1.0 | 4.0 MB | : 100% 1.0/1 [00:00<00:00, 1.73it/s] \npython-3.6.7 | 34.6 MB | : 100% 1.0/1 [00:04<00:00, 4.76s/it] \ndistributed-2.3.2 | 370 KB | : 100% 1.0/1 [00:00<00:00, 6.35it/s] \nrequests-2.22.0 | 84 KB | : 100% 1.0/1 [00:00<00:00, 19.35it/s]\nlibpng-1.6.37 | 343 KB | : 100% 1.0/1 [00:00<00:00, 1.68it/s] \ndask-cuda-0.10.0a | 924 KB | : 100% 1.0/1 [00:01<00:00, 1.35s/it] \nblinker-1.4 | 13 KB | : 100% 1.0/1 [00:00<00:00, 37.71it/s]\nurllib3-1.25.3 | 187 KB | : 100% 1.0/1 [00:00<00:00, 10.96it/s]\npsutil-5.6.3 | 322 KB | : 100% 1.0/1 [00:00<00:00, 9.19it/s]\ndask-2.3.0 | 4 KB | : 100% 1.0/1 [00:00<00:00, 35.54it/s]\nlibgfortran-ng-7.3.0 | 1.3 MB | : 100% 1.0/1 [00:00<00:00, 4.35it/s] \nlibgcc-ng-9.1.0 | 8.1 MB | : 100% 1.0/1 [00:02<00:00, 2.50s/it] \nlibopenblas-0.3.7 | 7.6 MB | : 100% 1.0/1 [00:01<00:00, 1.24s/it] \npyasn1-0.4.6 | 52 KB | : 100% 1.0/1 [00:00<00:00, 19.46it/s]\nlibprotobuf-3.8.0 | 4.7 MB | : 100% 1.0/1 [00:00<00:00, 1.11it/s] \ndlpack-0.2 | 12 KB | : 100% 1.0/1 [00:00<00:00, 36.21it/s]\npyasn1-modules-0.2.6 | 47 KB | : 100% 1.0/1 [00:00<00:00, 17.25it/s]\nicu-64.2 | 12.6 MB | : 100% 1.0/1 [00:01<00:00, 1.80s/it] \ngoogle-auth-1.6.3 | 45 KB | : 100% 1.0/1 [00:00<00:00, 23.64it/s]\nxz-5.2.4 | 366 KB | : 100% 1.0/1 [00:00<00:00, 10.19it/s]\npycparser-2.19 | 173 KB | : 100% 1.0/1 [00:00<00:00, 13.79it/s]\nbokeh-1.3.4 | 4.0 MB | : 100% 1.0/1 [00:01<00:00, 1.30s/it] \nnumba-0.45.1 | 3.1 MB | : 100% 1.0/1 [00:01<00:00, 1.09s/it] \npandas-0.24.2 | 11.1 MB | : 100% 1.0/1 [00:02<00:00, 2.27s/it] \nzstd-1.4.0 | 928 KB | : 100% 1.0/1 [00:00<00:00, 6.59it/s] \npytz-2019.2 | 228 KB | : 100% 1.0/1 [00:00<00:00, 5.14it/s] \npackaging-19.0 | 23 KB | : 100% 1.0/1 [00:00<00:00, 21.72it/s]\ndask-core-2.3.0 | 574 KB | : 100% 1.0/1 [00:00<00:00, 5.36it/s] \nzlib-1.2.11 | 105 KB | : 100% 1.0/1 [00:00<00:00, 23.01it/s]\npyjwt-1.7.1 | 17 KB | : 100% 1.0/1 [00:00<00:00, 27.75it/s]\ndask-cudf-0.10.0a | 63 KB | : 100% 1.0/1 [00:00<00:00, 1.35it/s] \ngoogle-auth-oauthlib | 18 KB | : 100% 1.0/1 [00:00<00:00, 24.14it/s]\ntblib-1.4.0 | 12 KB | : 100% 1.0/1 [00:00<00:00, 30.64it/s]\ntoolz-0.10.0 | 46 KB | : 100% 1.0/1 [00:00<00:00, 3.28it/s] \nre2-2019.09.01 | 431 KB | : 100% 1.0/1 [00:00<00:00, 9.75it/s]\njpeg-9c | 251 KB | : 100% 1.0/1 [00:00<00:00, 11.74it/s]\nzict-1.0.0 | 10 KB | : 100% 1.0/1 [00:00<00:00, 29.23it/s]\nsqlite-3.29.0 | 1.9 MB | : 100% 1.0/1 [00:00<00:00, 3.44it/s] \nparquet-cpp-1.5.1 | 3 KB | : 100% 1.0/1 [00:00<00:00, 36.91it/s]\nnvstrings-0.10.0a | 125 KB | : 100% 1.0/1 [00:00<00:00, 2.18s/it] \nreadline-8.0 | 441 KB | : 100% 1.0/1 [00:00<00:00, 9.68it/s]\npyparsing-2.4.2 | 57 KB | : 100% 1.0/1 [00:00<00:00, 21.37it/s]\nasn1crypto-0.24.0 | 154 KB | : 100% 1.0/1 [00:00<00:00, 14.00it/s]\nlocket-0.2.0 | 6 KB | : 100% 1.0/1 [00:00<00:00, 12.44it/s]\nlibffi-3.2.1 | 46 KB | : 100% 1.0/1 [00:00<00:00, 25.57it/s]\nscikit-learn-0.21.3 | 6.7 MB | : 100% 1.0/1 [00:01<00:00, 1.34s/it] \npartd-1.0.0 | 16 KB | : 100% 1.0/1 [00:00<00:00, 5.73it/s] \ngcsfs-0.3.0 | 19 KB | : 100% 1.0/1 [00:00<00:00, 25.81it/s]\nrequests-oauthlib-1. | 19 KB | : 100% 1.0/1 [00:00<00:00, 31.33it/s]\npy-xgboost-0.90.rapi | 86 KB | : 100% 1.0/1 [00:00<00:00, 2.77it/s] \ntk-8.6.9 | 3.2 MB | : 100% 1.0/1 [00:00<00:00, 1.68it/s] \nlibtiff-4.0.10 | 587 KB | : 100% 1.0/1 [00:00<00:00, 8.19it/s] \ncugraph-0.10.0a | 1.3 MB | : 100% 1.0/1 [00:00<00:00, 1.30it/s] \njoblib-0.13.2 | 180 KB | : 100% 1.0/1 [00:00<00:00, 11.07it/s]\npyopenssl-19.0.0 | 81 KB | : 100% 1.0/1 [00:00<00:00, 23.61it/s]\nrmm-0.10.0a | 14 KB | : 100% 1.0/1 [00:00<00:00, 3.76it/s] \nbzip2-1.0.8 | 397 KB | : 100% 1.0/1 [00:00<00:00, 9.33it/s]\npysocks-1.7.0 | 26 KB | : 100% 1.0/1 [00:00<00:00, 29.84it/s]\nCopying shared object files to /usr/lib\n\n*********************************************\nYour Google Colab instance is RAPIDS ready!\n*********************************************\n"
]
],
[
[
"### Required Imports",
"_____no_output_____"
]
],
[
[
"import cudf\nimport pandas as pd\nimport numpy as np\nimport math\nfrom math import cos, sin, asin, sqrt, pi, atan2\nfrom numba import cuda\nimport time\nimport os\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.linear_model import LinearRegression\nimport cuml\nfrom cuml.linear_model import LinearRegression as LinearRegressionGPU\n\n\n\n%matplotlib inline\n\nprint('NumPy Version:', np.__version__)\nprint('Scikit-learn Version:', sklearn.__version__)\nprint('cuDF Version:', cudf.__version__)\nprint('cuML Version:', cuml.__version__)",
"NumPy Version: 1.16.5\nScikit-learn Version: 0.21.3\ncuDF Version: 0.10.0a+1233.gf8e8353\ncuML Version: 0.10.0a+456.gb96498b\n"
]
],
[
[
"### Scikit-Learn",
"_____no_output_____"
],
[
"Linear Regression\n\ny = 2.0 * x + 1.0",
"_____no_output_____"
]
],
[
[
"\nn_rows = 1000000\nw = 2.0\nx = np.random.normal(loc=0, scale=2, size=(n_rows,))\nb = 1.0\ny = w * x + b\n\n\nnoise = np.random.normal(loc=0, scale=2, size=(n_rows,))\ny_noisy = y + noise\n\ny_noisy[:5]",
"_____no_output_____"
],
[
"plt.scatter(x, y_noisy, label='empirical data points')\nplt.plot(x, y, color='black', label='true relatioship')\nplt.legend()",
"_____no_output_____"
],
[
"%%time\nlinear_regression = LinearRegression()\nlinear_regression.fit(np.expand_dims(x, 1), y)",
"CPU times: user 29.8 ms, sys: 0 ns, total: 29.8 ms\nWall time: 28.8 ms\n"
]
],
[
[
"Create new data and perform inference",
"_____no_output_____"
]
],
[
[
"inputs = np.linspace(start=-5, stop=5, num=1000000)",
"_____no_output_____"
],
[
"outputs = linear_regression.predict(np.expand_dims(inputs, 1))",
"_____no_output_____"
]
],
[
[
"Let's now visualize our empirical data points",
"_____no_output_____"
]
],
[
[
"plt.scatter(x, y_noisy, label='empirical data points')\nplt.plot(x, y, color='black', label='true relatioship')\nplt.plot(inputs, outputs, color='red', label='predict relationships (cpu)')\nplt.legend()",
"_____no_output_____"
],
[
"df = cudf.DataFrame({'x': x, 'y': y_noisy})\ndf.head(5)",
"_____no_output_____"
],
[
"%%time\n# instantiate and fit model\nlinear_regression_gpu = LinearRegressionGPU()\nlinear_regression_gpu.fit(df[['x']], df['y'])",
"CPU times: user 22.7 ms, sys: 17.1 ms, total: 39.9 ms\nWall time: 39 ms\n"
],
[
"new_data_df = cudf.DataFrame({'inputs': inputs})",
"_____no_output_____"
],
[
"outputs_gpu = linear_regression_gpu.predict(new_data_df[['inputs']])",
"_____no_output_____"
],
[
"plt.scatter(x, y_noisy, label='empirical data points')\nplt.plot(x, y, color='black', label='true relationship')\nplt.plot(inputs, outputs, color='red', label='predicted relationship (cpu)')\nplt.plot(inputs, outputs_gpu.to_array(), color='green', label='predicted relationship (gpu)')\nplt.legend()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e752b9d56e0f9a174acfc2c56a10dc9551a7e90e | 6,477 | ipynb | Jupyter Notebook | Models/Prepare Data For Predictive Modeling.ipynb | mbrady4/ClinicalTrialFinder-DS | 5167c399bef48918aeae858626ce81fdd1945a7e | [
"MIT"
] | null | null | null | Models/Prepare Data For Predictive Modeling.ipynb | mbrady4/ClinicalTrialFinder-DS | 5167c399bef48918aeae858626ce81fdd1945a7e | [
"MIT"
] | null | null | null | Models/Prepare Data For Predictive Modeling.ipynb | mbrady4/ClinicalTrialFinder-DS | 5167c399bef48918aeae858626ce81fdd1945a7e | [
"MIT"
] | null | null | null | 22.105802 | 88 | 0.503628 | [
[
[
"## Dependencies",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport psycopg2",
"_____no_output_____"
]
],
[
[
"## Connect to Database",
"_____no_output_____"
]
],
[
[
"dbname = 'aact'\nuser = 'postgres'\npassword = 'lqt38be'\nhost = 'localhost'\n\nconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\ncurs = conn.cursor()",
"_____no_output_____"
],
[
"# Verifying Connection\nquery = \"\"\"SELECT COUNT(*) \nFROM ctgov.studies;\n\"\"\"\ncurs.execute(query)\ncurs.fetchall()",
"_____no_output_____"
]
],
[
[
"## Load Studies Table",
"_____no_output_____"
]
],
[
[
"query = 'SELECT * FROM ctgov.studies'\nstudies = pd.read_sql(sql=query, con=conn)\nstudies.shape",
"_____no_output_____"
]
],
[
[
"## Split into Pred, Test, Val, and Train Sets",
"_____no_output_____"
]
],
[
[
"studies['overall_status'].value_counts()",
"_____no_output_____"
],
[
"active_status = ['Recruiting', 'Active, not recruiting', 'Not yet recruiting', \n 'Enrolling by invitation', 'Available', 'Approved for marketing']",
"_____no_output_____"
],
[
"pred_set = studies[ studies['overall_status'].isin(active_status) ]\npred_set.shape",
"_____no_output_____"
],
[
"inactive_status = ['Completed', 'Terminated', 'Withdrawn', 'Suspended']",
"_____no_output_____"
],
[
"inactive_set = studies[ studies['overall_status'].isin(inactive_status) ]\ninactive_set.shape",
"_____no_output_____"
],
[
"inactive_set = inactive_set.copy()\ninactive_set['completion'] = (inactive_set['overall_status'] == 'Completed')",
"_____no_output_____"
],
[
"inactive_set = inactive_set.drop(columns='overall_status')",
"_____no_output_____"
],
[
"y = inactive_set['completion']\nX = inactive_set.drop(columns='completion')",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=42)\n\nX_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=42)\n\nX_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape",
"_____no_output_____"
],
[
"X_train.to_csv('X_train.csv', index=False)\nX_val.to_csv('X_val.csv', index=False)\nX_test.to_csv('X_test.csv', index=False)\ny_train.to_csv('y_train.csv', index=False, header=False)\ny_val.to_csv('y_val.csv', index=False, header=False)\ny_test.to_csv('y_test.csv', index=False, header=False)\npred_set.to_csv('pred_set.csv', index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e752bb94d26eba689f42b93531ee20097ac67a63 | 334,903 | ipynb | Jupyter Notebook | src/New S&P Project.ipynb | lucky135/McDonalds-Nutritional-Facts | 546b8eadcb43f654199594f8cc2981a2e3368ae9 | [
"MIT"
] | null | null | null | src/New S&P Project.ipynb | lucky135/McDonalds-Nutritional-Facts | 546b8eadcb43f654199594f8cc2981a2e3368ae9 | [
"MIT"
] | null | null | null | src/New S&P Project.ipynb | lucky135/McDonalds-Nutritional-Facts | 546b8eadcb43f654199594f8cc2981a2e3368ae9 | [
"MIT"
] | 1 | 2020-10-20T03:16:04.000Z | 2020-10-20T03:16:04.000Z | 98.587872 | 41,624 | 0.716557 | [
[
[
"# Mc Donalds - Nutritional Facts",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport thinkplot\nimport thinkstats2",
"_____no_output_____"
],
[
"result=pd.read_csv('menu.csv')",
"_____no_output_____"
]
],
[
[
"## Analyzing Data Frame",
"_____no_output_____"
]
],
[
[
"result.head()",
"_____no_output_____"
],
[
"result.tail()",
"_____no_output_____"
],
[
"result.describe()",
"_____no_output_____"
],
[
"result.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 260 entries, 0 to 259\nData columns (total 24 columns):\nCategory 260 non-null object\nItem 260 non-null object\nServing Size 260 non-null object\nCalories 260 non-null int64\nCalories from Fat 260 non-null int64\nTotal Fat 260 non-null float64\nTotal Fat (% Daily Value) 260 non-null int64\nSaturated Fat 260 non-null float64\nSaturated Fat (% Daily Value) 260 non-null int64\nTrans Fat 260 non-null float64\nCholesterol 260 non-null int64\nCholesterol (% Daily Value) 260 non-null int64\nSodium 260 non-null int64\nSodium (% Daily Value) 260 non-null int64\nCarbohydrates 260 non-null int64\nCarbohydrates (% Daily Value) 260 non-null int64\nDietary Fiber 260 non-null int64\nDietary Fiber (% Daily Value) 260 non-null int64\nSugars 260 non-null int64\nProtein 260 non-null int64\nVitamin A (% Daily Value) 260 non-null int64\nVitamin C (% Daily Value) 260 non-null int64\nCalcium (% Daily Value) 260 non-null int64\nIron (% Daily Value) 260 non-null int64\ndtypes: float64(3), int64(18), object(3)\nmemory usage: 48.8+ KB\n"
],
[
"print(\"Columns in the data frame : \",result.columns)",
"Columns in the data frame : Index(['Category', 'Item', 'Serving Size', 'Calories', 'Calories from Fat',\n 'Total Fat', 'Total Fat (% Daily Value)', 'Saturated Fat',\n 'Saturated Fat (% Daily Value)', 'Trans Fat', 'Cholesterol',\n 'Cholesterol (% Daily Value)', 'Sodium', 'Sodium (% Daily Value)',\n 'Carbohydrates', 'Carbohydrates (% Daily Value)', 'Dietary Fiber',\n 'Dietary Fiber (% Daily Value)', 'Sugars', 'Protein',\n 'Vitamin A (% Daily Value)', 'Vitamin C (% Daily Value)',\n 'Calcium (% Daily Value)', 'Iron (% Daily Value)'],\n dtype='object')\n"
],
[
"print(\"Shape : \",result.shape)",
"Shape : (260, 24)\n"
],
[
"result.isnull().any()",
"_____no_output_____"
]
],
[
[
"### Hence there are no null values in the Data.",
"_____no_output_____"
],
[
"## Let's study how many calories each food category contains. Therefore helping the health conscious people to select the perfect combination :-)",
"_____no_output_____"
]
],
[
[
"# Getting total number of calories for each food item in a separate column\nresult['Total Calories']=result['Calories']+result['Calories from Fat']",
"_____no_output_____"
],
[
"result['Total Calories'].head()",
"_____no_output_____"
],
[
"# Rounding Off the calories to nearest hundred so that data can be handled and analyzed easily\ndef roundOff(x):\n if x==0:\n x=0\n elif x<100:\n x=50\n else:\n y=x%100\n if y<50:\n x=x-y\n else:\n x=x-y+100 \n return x\n\nresult['Estimated Calories']=result['Total Calories'].apply(roundOff)\nresult['Estimated Calories'][:5]",
"_____no_output_____"
],
[
"x1=result.groupby('Category').agg(lambda x:x.value_counts().index[0])\nsns.barplot(x=x1['Estimated Calories'].index,y=x1['Estimated Calories'].values)\nplt.xticks(rotation=90)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### From the above graph its depicted that food items in 'Breakfast','Chicken & Fish' and 'Smoothies and Shakes' category contains the maximum amount of calories whereas amount of calories in 'Snacks & Sides','Salads' and 'Beef & Pork' are in a moderate amount while the lowest calories are in the food items that covers the 'Coffee & Tea' and 'Deserts' category.",
"_____no_output_____"
],
[
"## Applying PMF on Estimated Calories to analyze the trend.",
"_____no_output_____"
]
],
[
[
"pmf=thinkstats2.Pmf(result['Estimated Calories'])\npmf",
"_____no_output_____"
],
[
"plt.figure(figsize=(14,9))\nthinkplot.Pmf(pmf,color='green')",
"_____no_output_____"
]
],
[
[
"### From the graph we can see that most of the food items contain about 500 calories and majorly the range extends between 250 - 1000. Further, very less food items have calories more than 1200.",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"## Analyzing data on the basis of sugar present in the food items. Sugar free components have been removed",
"_____no_output_____"
]
],
[
[
"# Removing the sugar free food items\nsugar_plot=pd.DataFrame(result[result['Sugars']!=0])\nsugar_plot.head()",
"_____no_output_____"
],
[
"# Rounding Sugar Content to nearest multiple of 5\ndef roundSugar(x):\n if x<5:\n x=5\n else:\n check=x%5\n if check==0:\n x=x\n elif check<3:\n x=x-check\n else:\n x=x+5-check\n return x \n \nsugar_plot['Sugars']=sugar_plot['Sugars'].apply(roundSugar)\nsugar_plot['Sugars'][:5]",
"_____no_output_____"
],
[
"pmf=thinkstats2.Pmf(sugar_plot['Sugars'])\npmf",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,9))\nthinkplot.Pmf(pmf,color='red')",
"_____no_output_____"
]
],
[
[
"### After removing the sugar free food items, here we can see that about 25% of the food items include about 5g of sugar. That seems pretty good considering the amount of calories we take in at the Mc Donalds",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"## Now the people who go to gym need loads of proteins so let's see if they can have a great time at Mc Donalds and also complete their protein requirements.",
"_____no_output_____"
]
],
[
[
"# Rounding the Protein Content to nearest multiple of 5\ndef roundProtein(x):\n if x<5:\n x=5\n else:\n check=x%5\n if check==0:\n x=x\n elif check<3:\n x=x-check\n else:\n x=x+5-check\n return x\n\nresult['Protein']=result['Protein'].apply(roundProtein)\nresult['Protein'][:5]",
"_____no_output_____"
],
[
"pmf=thinkstats2.Pmf(result['Protein'])\npmf",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,9))\nthinkplot.Pmf(pmf,color='purple')",
"_____no_output_____"
]
],
[
[
"### Most of the food items contain 0 to 20 grams of protein. Well that's not satisfactory considering the amount of calories it gives. But still few food items majorly fish and meat give more protein than others and are preferred.",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"### According to the study provided by World Health Organisation, 1500mg of Sodium intake is required per day. Let's see how much is required to fulfill the Sodium requirement of customers.",
"_____no_output_____"
]
],
[
[
"plot=sns.swarmplot(x=\"Category\", y=\"Sodium\", data=result)\nplt.setp(plot.get_xticklabels(), rotation=45)\nplt.title(\"Sodium Intake\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"As seen in the graph,in an overall scenario the maximum Sodium is consumed by the customers during Breakfast. Let's see what meals can be avoided occasionally and what meals can be preferred.",
"_____no_output_____"
]
],
[
[
"x=result[result['Category']=='Breakfast']\nx",
"_____no_output_____"
],
[
"print('List of food items with high sodium intake consumed during breakfast: ')\nx[x['Sodium']>1500]['Item']",
"List of food items with high sodium intake consumed during breakfast: \n"
],
[
"print('List of food items with moderate to low Sodium intake: ')\nx[x['Sodium']<=1500]['Item']",
"List of food items with moderate to low Sodium intake: \n"
]
],
[
[
"### Analysing the healthy nutritional facts of the menu",
"_____no_output_____"
]
],
[
[
"health=['Dietary Fiber','Iron (% Daily Value)','Vitamin A (% Daily Value)','Vitamin C (% Daily Value)','Calcium (% Daily Value)']\nfor x in health:\n sns.barplot(x='Category',y=x,data=result)\n plt.xticks(rotation=90)\n plt.show()",
"C:\\Users\\laksh\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"print(\"Item with high Dietary Fiber: \",result.Item[result['Dietary Fiber']].max())\nprint(\"Item with high Calcium: \",result.Item[result['Calcium (% Daily Value)']].max())\nprint(\"Item with high Iron content: \",result.Item[result['Iron (% Daily Value)']].max())\nprint(\"Item with adequate vitamin A: \",result.Item[result['Vitamin A (% Daily Value)']].max())\nprint(\"Item with adequate vitamin C: \",result.Item[result['Vitamin C (% Daily Value)']].max())",
"Item with high Dietary Fiber: Steak & Egg McMuffin\nItem with high Calcium: Sausage McMuffin with Egg Whites\nItem with high Iron content: Sausage McMuffin with Egg Whites\nItem with adequate vitamin A: Side Salad\nItem with adequate vitamin C: Side Salad\n"
],
[
"sns.jointplot(x='Carbohydrates',y='Cholesterol',data=result,kind='reg')",
"C:\\Users\\laksh\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"result.Item[(result['Carbohydrates']>=25) & (result['Carbohydrates']<=77) & (result['Cholesterol']<=100)]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e752c23a4899fcaeab6f792284ff62d80928f4a5 | 57,026 | ipynb | Jupyter Notebook | docs/notebooks/visualization/loading.ipynb | Jwright707/brainl | 5ff1b14c1a5bd56f7003edc28c5399d6b67a09f5 | [
"Apache-2.0"
] | null | null | null | docs/notebooks/visualization/loading.ipynb | Jwright707/brainl | 5ff1b14c1a5bd56f7003edc28c5399d6b67a09f5 | [
"Apache-2.0"
] | null | null | null | docs/notebooks/visualization/loading.ipynb | Jwright707/brainl | 5ff1b14c1a5bd56f7003edc28c5399d6b67a09f5 | [
"Apache-2.0"
] | null | null | null | 69.290401 | 1,866 | 0.609862 | [
[
[
"# Loading neurons from s3",
"_____no_output_____"
]
],
[
[
"import napari\n%gui qt5",
"_____no_output_____"
],
[
"import brainlit\nfrom brainlit.utils.ngl_pipeline import NeuroglancerSession\nfrom brainlit.viz.swc import *\nimport numpy as np\nfrom skimage import io",
"/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/python_jsonschema_objects/__init__.py:53: UserWarning: Schema version http://json-schema.org/draft-04/schema not recognized. Some keywords and features may not be supported.\n self.schema[\"$schema\"]\n"
]
],
[
[
"## Loading entire neuron from AWS \n",
"_____no_output_____"
],
[
"`napari.components.viewer_model.ViewerModel.add_swc` does this via the following functions in the `napari.layers.swc.swc` module\n1. `swc.read_s3` to read the s3 file into a pd.DataFrame\n2. `swc.read_swc` to read the swc file into a pd.DataFrame\n3. `swc.generate_df_subset` creates a smaller subset of the original dataframe with coordinates in img space\n4. `swc.swc_to_voxel` to convert the coordinates from spatial to voxel coordinates\n5. `swc.df_to_graph` to convert the DataFrame into a netwrokx.DiGraph\n6. `swc.graph_to_paths` to convert from a graph into a list of paths\n7. `ViewerModel.add_shapes` to add the paths as a shape layer into the napari viewer",
"_____no_output_____"
],
[
"### 1. `read_s3`\nThis function parses the swc file into a pd.DataFrame. Each row is a vertex in the swc file with the following information: \n\n`sample number`\n\n`structure identifier`\n\n`x coordinate`\n\n`y coordinate`\n\n`z coordinate`\n\n`radius of dendrite`\n\n`sample number of parent`\n\nThe coordinates are given in spatial units of micrometers ([swc specification](http://www.neuronland.org/NLMorphologyConverter/MorphologyFormats/SWC/Spec.html))",
"_____no_output_____"
]
],
[
[
"s3_path = \"s3://open-neurodata/brainlit/brain1_segments\"\nseg_id = 2\nmip = 1\ndf = read_s3(s3_path, seg_id, mip)\ndf.head()",
"Downloading: 100%|██████████| 1/1 [00:00<00:00, 16.27it/s]\n"
]
],
[
[
"### 2. `swc.read_swc`\nThis function parses the swc file into a pd.DataFrame. Each row is a vertex in the swc file with the following information: \n\n`sample number`\n\n`structure identifier`\n\n`x coordinate`\n\n`y coordinate`\n\n`z coordinate`\n\n`radius of dendrite`\n\n`sample number of parent`\n\nThe coordinates are given in spatial units of micrometers ([swc specification](http://www.neuronland.org/NLMorphologyConverter/MorphologyFormats/SWC/Spec.html))",
"_____no_output_____"
]
],
[
[
"consen_neuron_path = '2018-08-01_G-002_consensus.swc'\n\ndf = read_swc(swc_path=consen_neuron_path)\ndf.head()",
"_____no_output_____"
]
],
[
[
"### 3. `generate_df_subset`\nThis function parses the swc file into a pd.DataFrame. Each row is a vertex in the swc file with the following information: \n\n`sample number`\n\n`structure identifier`\n\n`x coordinate`\n\n`y coordinate`\n\n`z coordinate`\n\n`radius of dendrite`\n\n`sample number of parent`\n\nThe coordinates are given in same spatial units as the image file when using `ngl.pull_vertex_list`",
"_____no_output_____"
]
],
[
[
"# Choose vertices to use for the subneuron\nsubneuron_df = df[0:3] \nvertex_list = subneuron_df['sample'].array \n\n# Define a neuroglancer session\nurl = \"s3://open-neurodata/brainlit/brain1\"\nmip = 1\nngl = NeuroglancerSession(url, mip=mip)\n\n# Get vertices\nseg_id = 2\nbuffer = [10, 10, 10]\nimg, bounds, vox_in_img_list = ngl.pull_vertex_list(seg_id, vertex_list, buffer = buffer, expand = True)\n\ndf_subneuron = generate_df_subset(subneuron_df,vox_in_img_list)\nprint(df_subneuron)",
"Downloading: 100%|██████████| 1/1 [00:00<00:00, 42.07it/s]\nDownloading: 100%|██████████| 1/1 [00:00<00:00, 42.80it/s]\nDownloading: 100%|██████████| 1/1 [00:00<00:00, 37.35it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\nDownloading: 0%| | 0/1 [00:00<?, ?it/s]\n"
]
],
[
[
"### 4. `swc_to_voxel`\n\nIf we want to overlay the swc file with a corresponding image, we need to make sure that they are in the same coordinate space. Because an image in an array of voxels, it makes sense to convert the vertices in the dataframe from spatial units into voxel units.\n\nGiven the `spacing` (spatial units/voxel) and `origin` (spatial units) of the image, `swc_to_voxel` does the conversion by using the following equation:\n\n$voxel = \\frac{spatial - origin}{spacing}$",
"_____no_output_____"
]
],
[
[
"spacing = np.array([0.29875923,0.3044159,0.98840415])\norigin = np.array([70093.276,15071.596,29306.737])\n\ndf_voxel = swc_to_voxel(df=df, spacing=spacing, origin=origin)\ndf_voxel.head()",
"_____no_output_____"
]
],
[
[
"### 5. `df_to_graph`\nA neuron is a graph with no cycles (tree). While napari does not support displaying graph objects, it can display multiple paths. \n\nThe DataFrame already contains all the possible edges in the neurons. Each row in the DataFrame is an edge. For example, from the above we can see that `sample 2` has `parent 1`, which represents edge `(1,2)`. `sample 1` having `parent -1` means that `sample 1` is the root of the tree.\n\n`swc.df_to_graph` reads DataFrame and converts it into a networkx directional graph.",
"_____no_output_____"
]
],
[
[
"G = df_to_graph(df)\nprint('Number of nodes:', len(G.nodes))\nprint('Number of edges:', len(G.edges))\nprint('\\n')\nprint('Sample 1 coordinates (x,y,z)')\nprint(G.nodes[1]['x'],G.nodes[1]['y'],G.nodes[1]['z'])",
"Number of nodes: 1650\nNumber of edges: 1649\n\n\nSample 1 coordinates (x,y,z)\n4713 4470 3857\n"
]
],
[
[
"### 6. `graph_to_paths`\nThis function takes in a graph and returns a list of non-overlapping paths. The union of the paths forms the graph.\n\nThe algorithm works by:\n\n1. Find longest path in the graph ([networkx.algorithms.dag.dag_longest_path](https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.dag.dag_longest_path.html))\n2. Remove longest path from graph\n3. Repeat steps 1 and 2 until there are no more edges left in the graph",
"_____no_output_____"
]
],
[
[
"paths = graph_to_paths(G=G)\nprint(f\"The graph was decomposed into {len(paths)} paths\")",
"The graph was decomposed into 179 paths\n"
]
],
[
[
"### 6. `ViewerModel.add_shapes`\nnapari displays \"layers\". The most common layer is the image layer. In order to display the neuron, we use `path` from the [shapes](https://napari.org/tutorials/shapes) layer",
"_____no_output_____"
]
],
[
[
"viewer = napari.Viewer(ndisplay=3)\nviewer.add_shapes(data=paths, shape_type='path', edge_color='white', name='Skeleton 2')",
"_____no_output_____"
]
],
[
[
"## Loading sub-neuron\n\nThe image of the entire brain has dimensions of (33792, 25600, 13312) voxels. G-002 spans a sub-image of (7386, 9932, 5383) voxels. Both are too big to load in napari and overlay the neuron.\nTo circumvent this, we can crop out a smaller region of the neuron, load the sub-neuron, and load the corresponding sub-image.\n\nIn order to get a sub-neuron, we need to specify the `bounding_box` that will be used to crop the neuron. `bounding_box` is a length 2 tuple. The first element is one corner of the bounding box (inclusive) and the second element is the opposite corner of the bounding box (exclusive). Both corners are in voxel units.\n\n`add_swc` can do all of this automatically when given `bounding_box` by following these steps:\n\n1. `read_s3` to read the swc file into a pd.DataFrame\n2. `swc_to_voxel` to convert the coordinates from spatial to voxel coordinates\n3. `df_to_graph` to convert the DataFrame into a netwrokx.DiGraph\n**3.1 `swc.get_sub_neuron` to crop the graph by `bounding_box`**\n4. `graph_to_paths` to convert from a graph into a list of paths\n5. `ViewerModel.add_shapes` to add the paths as a shape layer into the napari viewer",
"_____no_output_____"
],
[
"### 7. `get_sub_neuron`\nThis function crops a graph by removing edges. It removes edges that do not intersect the bounding box.\n\nEdges that intersect the bounding box will have at least one of its vertices be contained by the bounding box. The algorithm follows this principle by checking the neighborhood of vertices.\n\nFor each vertex *v* in the graph:\n\n1. Find vertices belonging to local neighborhood of *v*\n2. If vertex *v* or any of its local neighborhood vertices are in the bounding box, do nothing. Otherwise, remove vertex *v* and its edges from the graph\n\nWe check the neighborhood of *v* along with *v* because we want the sub-neuron to show all edges that pass through the bounding box, including edges that are only partially contained.",
"_____no_output_____"
]
],
[
[
"# Create an NGL session to get the bounding box\nngl_sess = NeuroglancerSession(mip = 1)\nimg, bbbox, vox = ngl_sess.pull_chunk(2, 300, 1, 1, 1)\nbbox = bbbox.to_list()\nbox = (bbox[:3], bbox[3:])\nprint(box)",
"Downloading: 100%|██████████| 1/1 [00:00<00:00, 22.80it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\nDownloading: 0%| | 0/3 [00:00<?, ?it/s]\n"
],
[
"G_sub = get_sub_neuron(G, box)\npaths_sub = graph_to_paths(G_sub)\nviewer = napari.Viewer(ndisplay=3)\nviewer.add_shapes(data=paths_sub, shape_type='path', edge_color='blue', name='sub-neuron')\n\n# overlay corresponding image\nimage_path = 'G-002_15312-4400-6448_15840-4800-6656.tif'\nimg_comp = io.imread(image_path)\nimg_comp = np.swapaxes(img_comp,0,2)\n\nviewer.add_image(img_comp)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e752c447055c848b3f4926338721957850169074 | 287,414 | ipynb | Jupyter Notebook | reactors/1D_pfr_surfchem.ipynb | santoshshanbhogue/cantera-jupyter | 8354fade21507b556bf5fff109dbb067fd8b8953 | [
"BSD-3-Clause"
] | 73 | 2016-08-30T18:41:48.000Z | 2022-03-27T12:32:08.000Z | reactors/1D_pfr_surfchem.ipynb | santoshshanbhogue/cantera-jupyter | 8354fade21507b556bf5fff109dbb067fd8b8953 | [
"BSD-3-Clause"
] | 27 | 2016-08-10T22:17:22.000Z | 2022-03-19T19:10:52.000Z | reactors/1D_pfr_surfchem.ipynb | santoshshanbhogue/cantera-jupyter | 8354fade21507b556bf5fff109dbb067fd8b8953 | [
"BSD-3-Clause"
] | 53 | 2016-08-01T23:06:47.000Z | 2022-03-18T15:19:11.000Z | 378.176316 | 139,188 | 0.924078 | [
[
[
"# 1D Plug Flow Reactor Model with Surface Chemistry",
"_____no_output_____"
],
[
"In this model, we will illustrate the derivation of the governing differential equations and algebraic constraints, calculation of the initial conditions of the variables and their spatial derivatives and use the [scikits.odes.dae](http://scikits-odes.readthedocs.io/en/latest/guide.html#object-oriented-interface-ode-and-dae) IDA solver to solve this system of differential algebraic equations (DAE).",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function, division\nimport numpy as np\nfrom scikits.odes import dae\nimport cantera as ct\nimport matplotlib.pyplot as plt\n%matplotlib inline\nprint('Runnning Cantera version: ' + ct.__version__)",
"Runnning Cantera version: 2.4.0\n"
]
],
[
[
"## Define gas species, bulk species, surface species and the interface",
"_____no_output_____"
],
[
"Here, we use a kinetic mechanism involving the chemical vapor deposition of silicon nitride (Si<sub>3</sub>N<sub>4</sub>) from SiF<sub>4</sub> and NH<sub>3</sub>. 25 gas species, 6 surface species and 2 bulk species mechanism is applied by [Richard S. Larson et al. 1996, SAND96-8211](https://github.com/yuj056/yuj056.github.io/blob/master/_posts/Sandia.pdf).",
"_____no_output_____"
]
],
[
[
"#import the SiF4 + NH3 reaction mechanism\nmech = 'data/SiF4_NH3_mec.cti'\n#import the models for gas and bulk\ngas, bulk_Si, bulk_N = ct.import_phases(mech, ['gas', 'SiBulk', 'NBulk'])\n#import the model for gas-Si-N interface\ngas_Si_N_interface = ct.Interface(mech, 'SI3N4', [gas,bulk_Si,bulk_N])",
"_____no_output_____"
]
],
[
[
"# Case 1: isothermal reactor\n## Define reactor conditions : temperature, pressure, fuel, and some important parameters",
"_____no_output_____"
]
],
[
[
"T0 = 1713 # Kelvin\np0 = 2 * ct.one_atm / 760.0 # Pa ~2 Torr\ngas.TPX = T0, p0, \"NH3:6, SiF4:1\"\nbulk_Si.TP = T0, p0\nbulk_N.TP = T0, p0\ngas_Si_N_interface.TP = T0, p0\nD = 5.08e-2 # diameter of the tube [m]\nAc = np.pi * D**2/4 # cross section of the tube [m^2]\nmu = 5.7e-5 # kg/(m-s) dynamic viscosity\nperim = np.pi * D # perimeter of the tube\n# calculate the site fractions of surface species at the entrance of the tube at steady state\ngas_Si_N_interface.advance_coverages(100.0) # Here we assume after 100s, the system reaches the steady state\nZk_0 = gas_Si_N_interface.coverages\nN = gas.n_species # number of gas species\nM = gas_Si_N_interface.n_species # number of surface species",
"_____no_output_____"
]
],
[
[
"## Define a residual function for IDA solver",
"_____no_output_____"
],
[
"For the isothermal tube with laminar flow, since the temperature of the flow and tube is constant, the energy conservation equation can be ignored. The governing equations include conservation of mass and species, momentum equation, equation of state, and the algebraic constraints that the net production rate of surface species by heterogeneous reactions are zero and that the sum of site fractions equals 1.\n\nHere we define a residual function, an equation which should always evaluate to the zero vector, as the input of IDA solver, which listed as follows:",
"_____no_output_____"
]
],
[
[
"%%latex\n\\begin{align}\n R[0] &= u\\frac{d\\rho}{dz} + \\rho\\frac{du}{dz} - \\frac{p'}{A_c}\\sum^{K_g}\\dot{s}_{k,g}W_{k,g} \\\\\n R[1] &= \\rho u A_c\\frac{dY_k}{dz} + Y_k p'\\sum^{K_g}\\dot{s}_{k,g}W_{k,g} - \\dot{\\omega_k}W_kA_c - \\dot{s}_{k,g}W_{k,g} p' \\\\\n R[2] &= 2\\rho u \\frac{du}{dz} + u^2\\frac{d\\rho}{dz} + \\frac{dP}{dz} + \\frac{32u\\mu}{D^2}\\\\\n R[3] &= P\\bar{W} - \\rho RT\\\\\n R[4] &= \\dot{s}_{k,s} \\\\\n R[5] &= \\sum_{phase}{Z_{k,s}} - 1\n\\end{align} ",
"_____no_output_____"
]
],
[
[
"The detailed derivation of the DAE system can be found in [my report](https://github.com/yuj056/yuj056.github.io/blob/master/Week1/yuj056_github_io.pdf).",
"_____no_output_____"
]
],
[
[
"def residual(z, vec, vecp, result):\n \"\"\" we create the residual equations for the problem\n vec = [u, rho, Yk, p, Zk]\n vecp = [dudz, drhodz, dYkdz, dpdz, dZkdz]\n \"\"\"\n # temporary variables\n u = vec[0] # velocity\n rho = vec[1] # density\n Y = vec[2:2+N] # vector of mass fractions of all gas species\n p = vec[2+N] # pressure\n Z = vec[3+N:] # vector of site fractions of all surface species \n\n dudz = vecp[0] # velocity spatial derivative\n drhodz = vecp[1] # density spatial derivative\n dYdz = vecp[2:2+N] # mass fraction spatial derivative\n dpdz = vecp[2+N] # pressure spatial derivative\n\n # Use unnormalized mass fractions to avoid over-constraining the system\n gas.set_unnormalized_mass_fractions(Y)\n gas.TP = T0,p\n\n bulk_Si.TP = T0,p\n bulk_N.TP = T0,p\n\n # Use unnormalized site fractions (coverages) to avoid over-constraining the system\n gas_Si_N_interface.set_unnormalized_coverages(Z)\n gas_Si_N_interface.TP = T0,p\n\n # temporary variables (based on the given state)\n coverages = gas_Si_N_interface.coverages # site fraction vector\n sdot_g = gas_Si_N_interface.net_production_rates[:N] # heterogeneous production rate of gas species\n sdot_s = gas_Si_N_interface.net_production_rates[-M:]\n wdot_g = gas.net_production_rates # homogeneous production rate of gas species\n W_g = gas.molecular_weights # vector of molecular weight of gas species\n\n # mass continuity equation\n result[0] = u*drhodz + rho*dudz - perim*np.sum(sdot_g*W_g)/Ac\n\n # conservation of species\n for k in range(N):\n result[1+k] = (rho*u*Ac*dYdz[k] + Y[k]*perim*np.sum(sdot_g*W_g)\n - wdot_g[k]*W_g[k]*Ac\n - sdot_g[k]*W_g[k]*perim)\n # conservation of momentum\n result[1+N] = 2*rho*u*dudz + np.power(u,2)*drhodz + dpdz + 32*u*mu/D**2 \n\n # equation of state\n result[2+N] = gas.density - rho\n\n # algebraic constraints\n for j in range(M):\n result[3+N+j] = sdot_s[j]\n\n # replace the constraint with the condition sum(Zk) = 1 for the largest site fraction species\n index = np.argmax(coverages)\n result[3+N+index] = np.sum(coverages) - 1",
"_____no_output_____"
]
],
[
[
"## Determine the initial values of the spatial derivatives of the unknowns which need to be used as the initial conditions for the IDA solver",
"_____no_output_____"
],
[
"The following linear equation system has been solved by [np.linalg.solve](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.solve.html), a linear solver, to calculate the initial values of the spatial derivatives of the unknowns.",
"_____no_output_____"
]
],
[
[
"%%latex\n\\begin{align}\n u_0\\rho_0' + \\rho_0 u_0' - \\frac{p'}{A_c}\\sum^{K_g}\\dot{s}_{k,g}W_{k,g} &= 0\\\\\n \\rho_0 u_0 A_c Y_{k,0}' + Y_{k,0} p'\\sum^{K_g}\\dot{s}_{k,g}W_{k,g} - \\dot{\\omega_k}W_kA_c - \\dot{s}_{k,g}W_{k,g} p' &=0 \\\\\n 2\\rho_0 u_0 u_0' + u_0^2\\rho_0' + P_0' + \\frac{32u_0 \\mu}{D^2} &=0\\\\\n -RT\\rho_0' + \\bar{W_0}P_0' - P_0\\frac{\\sum^{K_g}Y_{k}'/W_{k,g}}{(\\sum^{K_g}Y_{k}/W_{k,g})^2} &= 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"We assume the derivatives of the site fractions are equal to zero, although it is trivial for the IDA solver.",
"_____no_output_____"
]
],
[
[
"######## Solve linear system for the initial vecp ###########\n\"\"\"\n a = coefficient of [u', rho', Yk', P']\n b = RHS constant of each conservation equations\n\"\"\"\nrho0 = gas.density # initial density of the flow\nu0 = 11.53 # m/s initial velocity of the flow\nW = gas.molecular_weights\nW_avg = gas.mean_molecular_weight\nsdot = gas_Si_N_interface.net_production_rates # heterogeneous molar production rate\nwdot = gas.net_production_rates # homogeneous molar production rate\n################### a #########################\na = np.zeros((3+N,3+N))\na[0,:] = np.hstack((rho0, u0, np.zeros(1+N)))\nfor i in range(N):\n a[1+i,2+i] = rho0*u0*Ac\na[1+N,:] = np.hstack((2*rho0*u0, u0**2, np.zeros(N), 1))\ncoef = np.zeros(N)\nfor j in range(N):\n coef[j] = gas.P/W[j]/np.power(np.sum(gas.Y/W),2)\na[2+N,:] = np.hstack((0, ct.gas_constant*T0, coef, -W_avg))\n################### b ###########################\nb = np.zeros(3+gas.n_species)\nb[0] = perim*np.sum(sdot[:N]*W)/Ac\nfor i in range(gas.n_species):\n b[1+i] = (wdot[i]*W[i]*Ac\n + sdot[i]*W[i]*perim\n - gas.Y[i]*perim*np.sum(sdot[:N]*W))\nb[1+gas.n_species] = -32*u0*mu/D**2\nb[2+gas.n_species] = 0\npart_vecp0 = np.linalg.solve(a,b)\n\nvecp0 = np.hstack((part_vecp0, np.zeros(M)))\nvec0 = np.hstack((11.53, gas.density, gas.Y, gas.P, Zk_0))",
"_____no_output_____"
]
],
[
[
"## Run the IDA solver to calculate the unknowns varying in the flow direction",
"_____no_output_____"
]
],
[
[
"solver = dae(\n 'ida',\n residual, \n first_step_size=1e-16,\n atol=1e-8, # absolute tolerance for solution\n rtol=1e-8, # relative tolerance for solution\n # If the given problem is of type DAE, some items of the residual vector\n # returned by the 'resfn' have to be treated as algebraic equations, and\n # algebraic variables must be defined. These algebraic variables are\n # denoted by the position (index) in the state vector y. All these\n # indexes have to be specified in the 'algebraic_vars_idx' array.\n algebraic_vars_idx=[np.arange(3+N,3+N+M,1)], \n max_steps=5000,\n old_api=False # Forces use of new api (namedtuple)\n)\n\ntimes = np.arange(0,0.7,0.01)\nsolution = solver.solve(times, vec0, vecp0)",
"_____no_output_____"
]
],
[
[
"## Plot the results",
"_____no_output_____"
]
],
[
[
"# plot velocity of gas along the flow direction\nf, ax = plt.subplots(3,2, figsize=(9,9), dpi=96)\nax[0,0].plot(times, solution.values.y[:,0], color='C0')\nax[0,0].set_xlabel('Distance (m)')\nax[0,0].set_ylabel('Velocity (m/s)')\n\n# plot gas density along the flow direction\nax[0,1].plot(times, solution.values.y[:,1], color='C1')\nax[0,1].set_xlabel('Distance (m)')\nax[0,1].set_ylabel('Density ($\\mathregular{kg/m^3}$)')\nax[0,1].ticklabel_format(axis='y', style='sci', scilimits=(-2,2)) # scientific notation\n\n# plot major and minor gas species separately\nminor_idx = []\nmajor_idx = []\nfor i,name in enumerate(gas.species_names): \n mean = np.mean(solution.values.y[:,2+i])\n if mean <= 0.01:\n minor_idx.append(i) \n else:\n major_idx.append(i)\n\n# plot minor species\nfor i in minor_idx:\n style = '-' if i < 10 else '--' \n ax[1,0].plot(times, solution.values.y[:,2+i], label=gas.species_names[i], linestyle=style)\nax[1,0].legend(fontsize=7, loc='upper right')\nax[1,0].set_xlabel('Distance (m)')\nax[1,0].set_ylabel('Mass Fraction')\nax[1,0].ticklabel_format(axis='y', style='sci', scilimits=(-2,2)) # scientific notation\n\n# plot major species\nfor j in major_idx:\n ax[1,1].plot(times,solution.values.y[:,2+j], label=gas.species_names[j])\nax[1,1].legend(loc='best')\nax[1,1].set_xlabel('Distance (m)')\nax[1,1].set_ylabel('Mass Fraction')\n\n# plot the pressure of the gas along the flow direction\nax[2,0].plot(times, solution.values.y[:,2+N], color='C2')\nax[2,0].set_xlabel('Distance (m)')\nax[2,0].set_ylabel('Pressure (Pa)')\n\n# plot the site fraction of the surface species along the flow direction \nfor i,name in enumerate(gas_Si_N_interface.species_names):\n ax[2,1].plot(times, solution.values.y[:,3+N+i], label=name)\nax[2,1].legend()\nax[2,1].set_xlabel('Distance (m)')\nax[2,1].set_ylabel('Site Fraction')\nf.tight_layout(pad=0.5)",
"_____no_output_____"
]
],
[
[
"# Case 2: Adiabatic reactor",
"_____no_output_____"
],
[
"Since the application of isothermal reactor is not prevalent, to improve the model for real use, the adiabatic reator is considered. Here, the energy balance equation is also considered.\n\nThe heat flow rate into the system has two components. One is due to the heat flux $q_e$ from the surroundings to the outer tube wall (whose surface area per unit length is $a_e$) and accumulation of enthalpy in the bulk solid. The other is due to $q_i$, the heat flux to the gas from the inner tube wall, and accumulation of enthalpy in the surface species. The expression of energy balance equation for this problem is as follows:",
"_____no_output_____"
]
],
[
[
"%%latex\n\\begin{align}\n \\rho u A_c c_p \\frac{dT}{dz} +A_c \\sum_{K_g}\\dot{\\omega}_k W_k h_k + p'\\sum_{K_g}h_k\\dot{s}_k W_k &= a_eq_e - p'\\sum^{K_b}_{bulk}\\dot{\\omega}_kh_k\\\\&=p'q_i + p'\\sum^{K_g}_{gas}\\dot{s_k}W_kh_k\n\\end{align}",
"_____no_output_____"
]
],
[
[
"Since the adiabatic reactor is considered, $q_e = 0$. Similar to the procedure for the isothermal reactor model, add the energy equation into the residual function and calculate the initial value of the spatial derivative of the temperature.",
"_____no_output_____"
]
],
[
[
"############################### initial conditions ##################################################################\n# import the SiF4 + NH3 reaction mechanism\nmech = 'data/SiF4_NH3_mec.cti'\n# import the models for gas and bulk\ngas, bulk_Si, bulk_N = ct.import_phases(mech,['gas','SiBulk','NBulk'])\n\n# import the model for gas-Si-N interface\ngas_Si_N_interface = ct.Interface(mech, 'SI3N4',[gas,bulk_Si,bulk_N])\nT0 = 1713 # K\np0 = 2 * ct.one_atm / 760.0 # Pa ~2Torr\ngas.TPX = T0, p0, \"NH3:6, SiF4:1\"\nbulk_Si.TP = T0, p0\nbulk_N.TP = T0, p0\ngas_Si_N_interface.TP = T0, p0\nD = 5.08e-2 # diameter of the tube [m]\nAc = np.pi * D**2/4 # cross section of the tube [m]\nmu = 5.7e-5 # kg/(m-s) dynamic viscosity\nperim = np.pi * D # perimeter of the tube\n# calculate the site fractions of surface species at the entrance of the tube at steady state\ngas_Si_N_interface.advance_coverages(100.0)\nZk_0 = gas_Si_N_interface.coverages\n######################################## IDA solver ###################################################################\ndef residual(z, vec, vecp, result):\n \"\"\" we create the residual equations for the problem\n vec = [u, rho, Yk, p, Zk, T]\n vecp = [dudz, drhodz, dYkdz, dpdz, dZkdz, dTdz]\n \"\"\"\n # temporary variables\n u = vec[0] # velocity\n rho = vec[1] # density\n Y = vec[2:2+N] # vector of mass fractions of all gas species\n p = vec[2+N] # pressure\n Z = vec[3+N:-1] # vector of site fractions of all surface species\n T = vec[-1] # temperature\n \n dudz = vecp[0] # velocity spatial derivative\n drhodz = vecp[1] # density spatial derivative\n dYdz = vecp[2:2+N] # mass fraction spatial derivative\n dpdz = vecp[2+N] # pressure spatial derivative\n dTdz = vecp[-1] # temperature spatial derivative\n \n h = gas.enthalpy_mass # enthalpy of gas species per mass\n h_Si = bulk_Si.enthalpy_mass # enthalpy of Si per mass\n h_N = bulk_N.enthalpy_mass # enthalpy of N per mass\n \n # initial conditions\n gas.set_unnormalized_mass_fractions(Y)\n gas.TP = T, p\n \n bulk_Si.TP = T, p\n bulk_N.TP = T, p\n gas_Si_N_interface.set_unnormalized_coverages(Z)\n gas_Si_N_interface.TP = T, p\n \n # temporary variables (based on the current system state)\n coverages = gas_Si_N_interface.coverages # site fraction vector\n sdot_g = gas_Si_N_interface.net_production_rates[:N] # heterogeneous production rate of gas species\n sdot_s = gas_Si_N_interface.net_production_rates[-M:] # molar production rate of surface speceis\n wdot_g = gas.net_production_rates # homogeneous production rate of gas species\n W_g = gas.molecular_weights # vector of molecular weight of gas species\n W_Si_b = bulk_Si.molecular_weights\n W_N_b = bulk_N.molecular_weights\n bdot = gas_Si_N_interface.net_production_rates[gas.n_species:gas.n_species+2] # bulk production rate\n \n # mass continuity equation\n result[0] = u*drhodz+rho*dudz-perim*np.sum(sdot_g*W_g)/Ac\n # conservation of species\n for k in range(gas.n_species):\n result[1+k] = (rho*u*Ac*dYdz[k] + Y[k]*perim*np.sum(sdot_g*W_g)\n - wdot_g[k]*W_g[k]*Ac\n - sdot_g[k]*W_g[k]*perim)\n # conservation of momentum\n result[1+gas.n_species] = 2*rho*u*dudz + np.power(u,2)*drhodz + dpdz + 32*u*mu/D**2\n\n # equation of state\n result[2+gas.n_species] = gas.density - rho\n\n # algebraic constraints\n for j in range(M):\n result[3+N+j] = sdot_s[j]\n \n # replace the constraints with the condition sum(Zk) = 1 for the largest site fraction species\n index = np.argmax(coverages)\n result[3+N+index] = np.sum(coverages) - 1\n \n # energy equation\n result[3+N+M] = (rho*u*Ac*gas.cp*dTdz\n + Ac*np.sum(wdot_g*W_g*h)\n + perim*np.sum(h*sdot_g*W_g)\n + perim*(bdot[0]*W_Si_b*h_Si + bdot[1]*W_N_b*h_N))",
"_____no_output_____"
],
[
"######## Solve linear system for the initial values of vecp ###########\n\"\"\"\n a = coefficient of [u', rho', Yk', P',T]\n b = RHS constant of each conservation equations\n\"\"\"\nrho0 = gas.density # initial density of the flow\nu0 = 11.53 # m/s initial velocity of the flow\nW = gas.molecular_weights\nW_avg = gas.mean_molecular_weight\nsdot = gas_Si_N_interface.net_production_rates # heterogeneous molar production rate\nwdot = gas.net_production_rates # homogeneours molar production rate\nh = gas.enthalpy_mass\nh_Si = bulk_Si.enthalpy_mass\nh_N = bulk_N.enthalpy_mass\nW_Si = bulk_Si.molecular_weights\nW_N = bulk_N.molecular_weights\n################### a #########################\na = np.zeros((4+N,4+N))\na[0,:] = np.hstack((rho0, u0, np.zeros(2+N)))\nfor i in range(N):\n a[1+i,2+i] = rho0*u0*Ac\na[1+N,:] = np.hstack((2*rho0*u0, u0**2, np.zeros(N), 1,0))\ncoef = np.zeros(N)\nfor j in range(N):\n coef[j] = gas.P/W[j]/np.power(np.sum(gas.Y/W),2)\na[2+N,:] = np.hstack((0, ct.gas_constant*T0, coef, -W_avg, 0))\na[3+N,:] = np.hstack((np.zeros(3+N), rho0*u0*Ac*gas.cp))\n################### b ###########################\nb = np.zeros(4+gas.n_species)\nb[0] = perim*np.sum(sdot[:N]*W)/Ac\nfor i in range(N):\n b[1+i] = (wdot[i]*W[i]*Ac\n + sdot[i]*W[i]*perim\n - gas.Y[i]*perim*np.sum(sdot[:N]*W))\nb[1+gas.n_species] = -32*u0*mu/D**2\nb[2+gas.n_species] = 0\nb[3+gas.n_species] = (- Ac*np.sum(wdot*W*h)\n - perim*np.sum(h*sdot[:N]*W)\n - perim*np.sum(sdot[N]*W_Si*h_Si + sdot[N+1]*W_N*h_N))\npart_vecp0 = np.linalg.solve(a,b)\n\nvecp0 = np.hstack((part_vecp0[:-1], np.zeros(M), part_vecp0[-1]))\nvec0 = np.hstack((11.53, gas.density, gas.Y, gas.P, Zk_0, T0))",
"_____no_output_____"
],
[
"solver = dae(\n 'ida',\n residual, \n atol=1e-8, # absolute tolerance for solution\n rtol=1e-8, # relative tolerance for solution\n algebraic_vars_idx=[np.arange(3+N,3+N+M,1)], \n max_steps=5000,\n one_step_compute=True,\n old_api=False\n)\n\ntime = []\nsolution = []\nstate = solver.init_step(0.0, vec0, vecp0)\nwhile state.values.t < 0.7:\n time.append(state.values.t)\n solution.append(state.values.y)\n state = solver.step(0.7)\n\ntime = np.array(time)\nsolution = np.array(solution)",
"_____no_output_____"
],
[
"f, ax = plt.subplots(4,2, figsize=(9,12), dpi=96)\n\n# plot gas velocity along the flow direction\nax[0,0].plot(time, solution[:,0], color='C0')\nax[0,0].set_xlabel('Distance (m)')\nax[0,0].set_ylabel('Velocity (m/s)')\n\n# plot gas density along the flow direction\nax[0,1].plot(time, solution[:,1], color='C1')\nax[0,1].set_xlabel('Distance (m)')\nax[0,1].set_ylabel('Density ($\\mathregular{kg/m^3}$)')\nax[0,1].ticklabel_format(axis='y', style='sci', scilimits=(-2,2)) # scientific notation\n\n# plot major and minor gas species separately\nminor_idx = []\nmajor_idx = []\nfor i,name in enumerate(gas.species_names): \n mean = np.mean(solution[:,2+i])\n if mean <= 0.01:\n minor_idx.append(i) \n else:\n major_idx.append(i)\n\n# plot minor gas species along the flow direction\nfor i in minor_idx:\n style = '-' if i < 10 else '--'\n ax[1,0].plot(time, solution[:,2+i], label=gas.species_names[i], linestyle=style)\nax[1,0].legend(fontsize=7.5, loc='best')\nax[1,0].set_xlabel('Distance (m)')\nax[1,0].set_ylabel('Mass Fraction')\nax[1,0].ticklabel_format(axis='y', style='sci', scilimits=(-2,2)) # scientific notation\n\n# plot major gas species along the flow direction\nfor j in major_idx:\n ax[1,1].plot(time, solution[:,2+j], label=gas.species_names[j])\nax[1,1].legend(fontsize=8, loc='best')\nax[1,1].set_xlabel('Distance (m)')\nax[1,1].set_ylabel('Mass Fraction')\n\n# plot the pressure of the gas along the flow direction\nax[2,0].plot(time, solution[:,2+N], color='C2')\nax[2,0].set_xlabel('Distance (m)')\nax[2,0].set_ylabel('Pressure (Pa)')\n\n# plot the site fraction of the surface species along the flow direction\nfor i,name in enumerate(gas_Si_N_interface.species_names):\n ax[2,1].plot(time, solution[:,3+N+i], label=name)\nax[2,1].legend(fontsize=8)\nax[2,1].set_xlabel('Distance (m)')\nax[2,1].set_ylabel('Site Fraction')\n\n# plot the temperature profile along the flow direction\nax[3,0].plot(time, solution[:,-1], color='C3')\nax[3,0].set_xlabel('Distance (m)')\nax[3,0].set_ylabel('Temperature (K)')\nf.tight_layout(pad=0.5)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e752c9ab6a3d6ae0777ca905e99a5406a68d028d | 146,371 | ipynb | Jupyter Notebook | Lab/L2/Lab2.ipynb | enigne/ScientificComputingBridging | 920f3c9688ae0e7d17cffce5763289864b9cac80 | [
"MIT"
] | 2 | 2021-05-04T01:15:32.000Z | 2021-11-08T15:08:27.000Z | Lab/L2/Lab2.ipynb | enigne/ScientificComputingBridging | 920f3c9688ae0e7d17cffce5763289864b9cac80 | [
"MIT"
] | null | null | null | Lab/L2/Lab2.ipynb | enigne/ScientificComputingBridging | 920f3c9688ae0e7d17cffce5763289864b9cac80 | [
"MIT"
] | null | null | null | 244.767559 | 42,727 | 0.748311 | [
[
[
"import numpy as np\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"%run ./measureErrors.py\nsaveFigure = True",
"_____no_output_____"
]
],
[
[
"# Lab Exercise 2 for SCB",
"_____no_output_____"
],
[
"## Errors",
"_____no_output_____"
],
[
"### Ex 1-3",
"_____no_output_____"
],
[
"* Try to read `absrelerror()` in `measureErrors.py` and use it for the exercises",
"_____no_output_____"
],
[
"### Ex. 4 Round-off errors",
"_____no_output_____"
]
],
[
[
"# Generate a random nxn matrix and compute A^{-1}*A which should be I analytically\ndef testErrA(n = 10):\n A = np.random.rand(n,n)\n Icomp = np.matmul(np.linalg.inv(A),A)\n Iexact = np.eye(n)\n absrelerror(Iexact, Icomp)",
"_____no_output_____"
]
],
[
[
"#### Random matrix $A$ with size $n=10$",
"_____no_output_____"
]
],
[
[
"testErrA()",
"*----------------------------------------------------------*\nThis program illustrates the absolute and relative error.\n*----------------------------------------------------------*\nAbsolute error: 3.6893631945840935e-14\nRelative error: 1.1666790810480723e-14\n"
]
],
[
[
"#### $n=100$",
"_____no_output_____"
]
],
[
[
"testErrA(100)",
"*----------------------------------------------------------*\nThis program illustrates the absolute and relative error.\n*----------------------------------------------------------*\nAbsolute error: 1.1445778429691323e-12\nRelative error: 1.1445778429691323e-13\n"
]
],
[
[
"#### $n=1000$",
"_____no_output_____"
]
],
[
[
"testErrA(1000)",
"*----------------------------------------------------------*\nThis program illustrates the absolute and relative error.\n*----------------------------------------------------------*\nAbsolute error: 6.045719583144339e-11\nRelative error: 1.911824397741983e-12\n"
]
],
[
[
"<span style=\"color:red\">**Note**:</span> The execution time changes with the size of $n$ almost linearly, but for $n=10000$, it will take much longer time.",
"_____no_output_____"
],
[
"### Ex. 5 Discretization Errors",
"_____no_output_____"
],
[
"Program that illustrate the concept discretization.\n\nReplacing continuous with discrete, i.e. represent a continuous function on a interval with a finite number of points.\n\nThe density, the number of points is determined by the choice of the discretization parameter $h$.",
"_____no_output_____"
],
[
"#### The step size\n\n**TRY** to change $h$ and see what will happen. \n\n**NOTE**: $h$ should not be too large or too small. A good range is in $[10^{-5},1]$.",
"_____no_output_____"
]
],
[
[
"h = 0.1",
"_____no_output_____"
]
],
[
[
"#### Discretize and compute the numerical derivatives.\n\nHere, the derivative `f'(x)` is computed in a finite number of points on a interval. \n",
"_____no_output_____"
]
],
[
[
"# The exact solution\nN = 400\nl = 0\nu = 2\nx = np.linspace(l, u, N)\nf_exa = np.exp(x)\n\n# check if h is too large or too small\nif h > 1 or h < 1e-5:\n h = 0.5\n\n# compute the numerical derivatives\nxh = np.linspace(l, u, int(abs(u-l)/h))\nfprimF = ForwardDiff(np.exp, xh, h);",
"_____no_output_____"
]
],
[
[
"#### Use `matplotlib` to visuallize the results. \n\nTry to check on [https://matplotlib.org/](https://matplotlib.org/) for mor features, it is really powerful!",
"_____no_output_____"
]
],
[
[
"# Plot\nfig, ax = plt.subplots(1)\nax.plot(x, f_exa, color='blue')\nax.plot(xh, fprimF, 'ro', clip_on=False)\nax.set_xlim([0,2])\nax.set_ylim([1,max(fprimF)])\nax.set_xlabel(r'$x$')\nax.set_ylabel('Derivatives')\nax.set_title('Discretization Errors')\nax.legend(['Exact Derivatives','Calculated Derivatives'])\n\nif saveFigure:\n filename = 'DiscretizationError_h' + str(h) + '.pdf'\n fig.savefig(filename, format='pdf', dpi=1000, bbox_inches='tight')\n",
"_____no_output_____"
]
],
[
[
"## Computer Arithmetic",
"_____no_output_____"
],
[
"Machine limits for floating point types use `np.finfo(float)`",
"_____no_output_____"
]
],
[
[
"print('machhine epsilon in python is: ' + str(np.finfo(float).eps))",
"machhine epsilon in python is: 2.220446049250313e-16\n"
]
],
[
[
"The overflow in python is shown by `np.finfo(float).max` and the underflow by `np.finfo(float).tiny`",
"_____no_output_____"
]
],
[
[
"print('The largest real number in python is: ' + str(np.finfo(float).max))\nprint('The smallest positive real number in python is: ' + str(np.finfo(float).tiny))",
"The largest real number in python is: 1.7976931348623157e+308\nThe smallest positive real number in python is: 2.2250738585072014e-308\n"
]
],
[
[
"Other attributes of `finfo` can be found [here](https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.finfo.html)",
"_____no_output_____"
],
[
"## Computation of the derivative",
"_____no_output_____"
],
[
"The function $f(x) = e^x$ in $x=1$ is used as test function.\n\n* forward difference: $\\displaystyle{f'(x)\\approx\\frac{f(x+h)-f(x)}{h}}$\n* central difference: $\\displaystyle{f'(x)\\approx\\frac{f(x+h)-f(x-h)}{2h}}$\n* five points difference: $\\displaystyles are $",
"_____no_output_____"
],
[
"#### Determine the range you would like to experiment with",
"_____no_output_____"
]
],
[
[
"# choose h from 0.1 to 10^-t, t>=2\nt = 15\nhx = 10**np.linspace(-1,-t, 30)",
"_____no_output_____"
]
],
[
[
"#### Compute the numerical derivatives using the three different schemes",
"_____no_output_____"
]
],
[
[
"# The exact derivative at x=1\nx0 = 1\nfprimExact = np.exp(1)\n\n# Numerical derivative using the three methods\nfprimF = ForwardDiff(np.exp, x0, hx)\nfprimC = CentralDiff(np.exp, x0, hx)\nfprim5 = FivePointsDiff(np.exp, x0, hx)\n\n# Relative error\nfelF = abs(fprimExact - fprimF)/abs(fprimExact)\nfelC = abs(fprimExact - fprimC)/abs(fprimExact)\nfel5 = abs(fprimExact - fprim5)/abs(fprimExact)",
"_____no_output_____"
]
],
[
[
"#### Visualize the results",
"_____no_output_____"
]
],
[
[
"# Plot\nfig, ax = plt.subplots(1)\nax.loglog(hx, felF)\nax.loglog(hx, felC)\nax.loglog(hx, fel5)\nax.autoscale(enable=True, axis='x', tight=True)\nax.set_xlabel(r'Step length $h$')\nax.set_ylabel('Relative error')\nax.legend(['Forward difference','Central difference', 'Five points difference'])\n\nif saveFigure:\n filename = 'NumericalDerivative.pdf'\n fig.savefig(filename, format='pdf', dpi=1000, bbox_inches='tight')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e752ca2340539d8537584b72ad6bed5b9231450b | 6,521 | ipynb | Jupyter Notebook | ml/sklearn/regression/Regression Description.ipynb | groovallstar/test2 | e02118794e44f92b18f9e394f2e2ea16ec1900fe | [
"MIT"
] | null | null | null | ml/sklearn/regression/Regression Description.ipynb | groovallstar/test2 | e02118794e44f92b18f9e394f2e2ea16ec1900fe | [
"MIT"
] | 4 | 2018-01-19T02:50:46.000Z | 2019-11-18T07:54:25.000Z | ml/sklearn/regression/Regression Description.ipynb | groovallstar/test2 | e02118794e44f92b18f9e394f2e2ea16ec1900fe | [
"MIT"
] | null | null | null | 45.601399 | 214 | 0.497163 | [
[
[
"회귀\n- 데이터 값이 평균과 같은 일정한 값으로 돌아가려는 경향을 이용한 통계학 기법\n- 여러 개의 독립변수와 한 개의 종속변수 간의 상관관계를 모델링하는 기법\n- 주어진 피처와 결정 값 데이터 기반에서 학습을 통해 최적의 회귀 계수를 찾아내는 것\n - Y는 종속변수, X는 독립 변수(feature), W는 이 독립변수의 값에 영향을 미치는 회귀 계수(Regression coefficients)\n$$ Y = W_1*X_1 + W_2*X_2+...+W_n*X_n $$\n\n회귀 유형\n- 회귀 계수의 선형/비선형 여부, 독립변수의 개수, 종속변수의 개수에 따라 나눌수 있음\n - 회귀 계수가 '선형이냐 아니냐' 에 따라 선형회귀, 비선형회귀로 나뉨 \n - 독립변수의 개수가 '한개인지 여러 개인지'에 따라 단일회귀, 다중회귀로 나뉨\n\n선형 회귀 종류\n- 일반 선형 회귀 : 예측값과 실제 값의 RSS(Residual Sum of Squares)를 최소화할 수 있또록 회귀 계수를 최적화 하며, 규제(Regularization)를 적용하지 않은 모델\n- 릿지(Ridge) : 선형 회귀에 L2 규제를 추가한 회귀 모델\n- 라쏘(Lasso) : 선형 회귀에 L1 규제를 적용한 방식\n- 엘라스틱넷(ElasticNet) : L2, L1 규제를 함께 결합한 모델\n- 로지스틱 회귀(Logistic Regression) : 분류에 사용되는 선형 모델\n\n최적의 회귀 모델을 만든다는 것은 전체 데이터의 잔차(오류값) 합이 최소가 되는 모델을 만든다는 의미. 동시에 오류 값 합이 최소가 될 수 있는 최적의 회귀 계수를 찾는 의미(절편과 기울기를 찾는 의미)",
"_____no_output_____"
],
[
"RSS(Residual Sum of Squares) 기반의 회귀 오류 측정\n- RSS : 오류 값의 제곱을 구해서 더하는 방식. 미분 등의 계산을 편리하게 하기 위해서 RSS 방식으로 오류 합을 구함 \n$$ERROR^2 = RSS$$\n$$ RSS = (1번 주택가격 - (w0 + w1 * 1번 주택크기)^2 + (2번 주택가격 - (w0 + w1 * 2번 주택크기)^2 + ...(모든 학습 데이터에 대해 RSS 수행) $$\n\nRSS의 이해\n- RSS는 이제 변수가 $W_0, W_1$인 식으로 표현할 수 있으며, 이 RSS를 최소로 하는 $W_0, W_1$, 즉 회귀 계수를 학습을 통해서 찾는 것이 핵심.\n- 회귀식의 독립변수 X, 종속변수 Y가 중심 변수가 아니라 w 변수(회귀 계수)가 중심 변수임을 인지하는 것이 매우 중요(학습 데이터로 입력되는 독립 변수와 종속 변수는 RSS에서 모두 상수로 간주함)\n- 일반적으로 RSS는 학습 데이터의 건수로 나누어서 정규화된 식으로 표현됨\n$$ RSS(w_0, w_1) = \\frac{1}{N}\\sum_{i=1}^N(y_i-(w_0+w1*x_i))^2 $$\n$$ (i는 1부터 학습 데이터의 총 건수 N까지) $$\n\nRSS : 회귀의 비용 함수(Cost function)\n- 회귀에서 이 RSS는 비용(Cost)이며, w 변수(회귀 계수)로 구성되는 RSS를 비용함수라고 함. 데이터를 계속 학습하면서 이 비용 함수가 반환하는 값(즉, 오류값)을 지속해서 감소시키고 최종적으로 더 이상 감소하지 않는 최소의 오류 값을 구하는 것. 비용 함수를 손실함수(loss function)라고도 함.\n",
"_____no_output_____"
],
[
"비용 최소화 하기 - 경사 하강법(Gradient Descent)\n- W 파라미터의 개수가 적다면 고차원 방정식으로 비용 함수가 최소가 되는 W변수값을 도출 할 수 있겠지만, W 파라미터가 많으면 고차원 방정식을 동원하더라도 해결하기 어려움. 경사 하강법은 이런 고차원 방정식에 대한 문제를 해결해 주면서 비용 함수 RSS를 최소화 하는 방법을 직관적으로 제공\n - '점진적으로' 반복적인 계산을 통해 W 파라미터 값을 업데이트하면서 오류 값이 최소가 되는 W 파라미터를 구하는 방식\n- 반복적으로 비용 함수의 반환 값, 즉 예측값과 실제 값의 차이가 작아지는 방향성을 가지고 W파라미터를 지속해서 보정해 나감\n- 최초 오류 값이 100이었다면 두 번째 오류 값은 100보다 작은 90, 세 번째는 80과 같은 방식으로 지속해서 오류를 감소시키는 방향으로 W 값을 계속 업데이트\n- 오류 값이 더 이상 작아지지 않으면 그 오류 값을 최소 비용으로 판단하고 그때의 W 값을 최적 파라미터로 반환\n- '어떻게 하면 오류가 작아지는 방향으로 W 값을 보정할 수 있을까?'\n\n미분을 통해 비용 함수의 최소값 찾기\n- 비용함수가 포물선 형태의 2차 함수라면 경사 하강법은 최초 w에서부터 미분을 적용한 뒤 이 미분 값을 계속 감소하는 방향으로 순차적으로 w를 업데이트함\n- 더 이상 미분된 1차 함수의 기울기가 감소하지 않는 지점을 비용 함수가 최소인 지점으로 간주하고 그때의 w를 반환",
"_____no_output_____"
],
[
"RSS의 편미분\n- R(w)를 미분해 미분 함수의 최소값을 구해야 하는데, R(W)는 두 개의 w 파라미터인 w0와 w1을 각각 가지고 있기 때문에 일반적인 미분을 적용할 수가 없고, w0, w1 각 변수에 편미분을 적용해야 함. R(w)를 최소화 하는 w0와 w1의 값은 각각 r(w)를 w0, w1으로 순차적으로 편미분을 수행해 얻을 수 있음. $x_i$는 feature.\n\n$$ \\frac{\\partial R(w)}{\\partial w_1} = \\frac{2}{N}\\sum_{i=1}^N-x_t*(y_i-(w_0+w_1x_i)) = -\\frac{2}{N}\\sum_{i=1}^Nx_i*(실제값_i-예측값_i) $$\n$$ \\frac{\\partial R(w)}{\\partial w_0} = \\frac{2}{N}\\sum_{i=1}^N-(y_i-(w_0+w_1x_i)) = -\\frac{2}{N}\\sum_{i=1}^N(실제값_i-예측값_i) $$\n\n- 편미분 : 다변수 함수에 대하여 그 중 하나의 변수에 주목하고 나머지 변수의 값을 고정시켜 놓고 그 변수로 미분하는 일\n - $x^2$ 미분 -> $2x$\n - $2x$ 미분 -> 2\n - 3 미분 -> 0\n - $ (A + B)^2 = A^2 + 2AB + B^2 $\n\n$$ R(w_0, w_1) = \\frac{1}{N}\\sum_{i=1}^N(y_i - (w_0+w_1x_i))^2 $$\n$$ = \\frac{1}{N}\\sum_{i=1}^N{y_i}^2 - 2y_i(w_0+w_1x_i) + (w_0+w_1x_i)^2 $$\n$$ = \\frac{1}{N}\\sum_{i=1}^N {y_i}^2 - 2y_i(w_0+w_1x_i) + {w_0}^2 + 2w_0w_1x_i + {w_1}^2{x_i}^2 $$\n\n(1) $w_1$ 으로 편미분\n$$ \\frac{\\partial R(w_0,w_1)}{\\partial w_1} = \\frac{1}{N}\\sum_{i=1}^N-2y_ix_i + 2w_0x_i + 2w_1{x_i}^2 $$\n$$ = \\frac{2}{N}\\sum_{i=1}^N-x_i*(y_i-w_0-w_1x_i) $$\n$$ = \\frac{2}{N}\\sum_{i=1}^N-x_i*(y_i-(w_0+w_1x_i)) $$\n$$ = \\frac{-2}{N}\\sum_{i=1}^Nx_i*(y_i-(w_0+w_1x_i)) $$\n$$ = \\frac{2}{N}\\sum_{i=1}^Nx_i*(실제값_i-예측값_i) $$\n\n(2) $w_0$ 으로 편미분\n$$ \\frac{\\partial R(w_0,w_1)}{\\partial w_0} = \\frac{1}{N}\\sum_{i=1}^N-2y_i+2w_0+2w_1x_i $$\n$$ = \\frac{2}{N}\\sum_{i=1}^N-(y-(w_0+w_1x_i)) $$\n$$ = \\frac{-2}{N}\\sum_{i=1}^N(실제값_i-예측값_i) $$\n\n- w1, w0의 편미분 결과값을 반복적으로 보정하면서 w1, w0 값을 업데이트하면 비용함수 R(W)가 최소가 되는 w1, w0값을 구할 수 있음. 실제로는 위 편미분 값이 너무 클 수 있기 때문에 보정계수 n을 곱하는데, 이를 '학습률'이라고 함\n - 새로운 $w_1$ = 이전 $w_1 - \\eta\\frac{2}{N}\\sum_{i=1}^Nx_i*(실제값_i-예측값_i)$\n - 새로운 $w_0$ = 이전 $w_0 - \\eta\\frac{2}{N}\\sum_{i=1}^N(실제값_i-예측값_i)$",
"_____no_output_____"
],
[
"경사 하강법 수행 프로세스\n\n- Step 1 : $w_1, w_0$를 임의의 값으로 설정하고 첫 비용 함수의 값을 계산함\n- Step 2 : $w_1$을 $w_1 - \\eta\\frac{2}{N}\\sum_{i=1}^Nx_i*(실제값_i-예측값_i)$, $w_0$을 $w_0 - \\eta\\frac{2}{N}\\sum_{i=1}^N(실제값_i-예측값_i)$으로 업데이트 한 후 다시 비용 함수의 값을 계산함\n- Step 3 : 비용 함수의 값이 감소했으면 다시 Step 2를 반복함. 더 이상 비용 함수의 값이 감소하지 않으면 그때의 $w_1,w_0$를 구하고 반복을 중지함",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e752d1a766fe18c1603f1042e73c6d0c891fe772 | 9,246 | ipynb | Jupyter Notebook | tripaware_2017/Cleared Outputs Notebooks/Expanded Trips.ipynb | shankari/e-mission-eval | ac678d9f4a3e53597ebc569e4f68140645b38ca3 | [
"BSD-3-Clause"
] | null | null | null | tripaware_2017/Cleared Outputs Notebooks/Expanded Trips.ipynb | shankari/e-mission-eval | ac678d9f4a3e53597ebc569e4f68140645b38ca3 | [
"BSD-3-Clause"
] | 13 | 2020-06-27T03:41:07.000Z | 2021-08-13T17:15:36.000Z | tripaware_2017/Cleared Outputs Notebooks/Expanded Trips.ipynb | corinne-hcr/e-mission-eval-private-data | 3825bbcd36b431d0458bb9d0c6c671043861a32e | [
"BSD-3-Clause"
] | 4 | 2017-07-30T15:53:00.000Z | 2018-07-03T06:01:20.000Z | 30.117264 | 175 | 0.552239 | [
[
[
"import stats_functions as sf\nimport emission.storage.timeseries.aggregate_timeseries as estag\nimport emission.storage.timeseries.timequery as estt\nimport arrow\nimport emission.core.get_database as edb\nfrom emission.core.wrapper.user import User\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"agts = estag.AggregateTimeSeries()\nsep_dec_tq_data_ts = estt.TimeQuery(\"data.ts\", arrow.get('2018-03-28', 'YYYY-MM-DD').timestamp, arrow.get('2018-06-06', 'YYYY-MM-DD').timestamp)\nclient_nav_events_df = agts.get_data_df(\"stats/client_nav_event\", time_query=sep_dec_tq_data_ts)\nclient_nav_events_df.head()",
"_____no_output_____"
],
[
"expanded_trip_events = client_nav_events_df[client_nav_events_df.name == \"expanded_trip\"]",
"_____no_output_____"
],
[
"information = []\nemotion = []\ncontrol = []\ninformation_uuids = set()\nemotion_uuids = set()\ncontrol_uuids = set()\nno_clients = set()\nfor i in range(len(expanded_trip_events)):\n uuid = expanded_trip_events[i]['user_id']\n try:\n client = edb.get_profile_db().find_one({\"user_id\": uuid})['client']\n if client == 'urap-2017-information':\n information.append(expanded_trip_events[i])\n information_uuids.add(uuid)\n elif client == 'urap-2017-emotion':\n emotion.append(expanded_trip_events[i])\n emotion_uuids.add(uuid)\n elif client == 'urap-2017-control':\n control.append(expanded_trip_events[i])\n control_uuids.add(uuid)\n except:\n no_clients.add(str(uuid))\nfor elem in no_clients:\n print(elem + \" doesnt have a client for some reason\")\n\nprint(str(len(expanded_trip_events)) + \" events\")",
"_____no_output_____"
],
[
"emotion_num_users = len(emotion_uuids)\ninformation_num_users = len(information_uuids)\ncontrol_num_users = len(control_uuids)\nemotion_total_expanded_trips = len(emotion)\ninformation_total_expanded_trips = len(information)\ncontrol_total_expanded_trips = len(control)\nprint(emotion_num_users, information_num_users, control_num_users)",
"_____no_output_____"
],
[
"objects = ('Emotion', 'Information', 'Control')\ny_pos = range(len(objects))\nperformance = [emotion_total_expanded_trips, information_total_expanded_trips, control_total_expanded_trips]\n\n# Total number of expanded trips per group\nplt.bar(y_pos, performance, align='center', alpha=0.5)\nplt.xticks(y_pos, objects)\nplt.ylabel('Expanded Trips Per Group')\nplt.title('Number of Expanded Trips')\n\nplt.show()",
"_____no_output_____"
],
[
"# Average expanded trips per person\nperformance = [emotion_total_expanded_trips/emotion_num_users, information_total_expanded_trips/information_num_users, control_total_expanded_trips/control_num_users]\n\nplt.bar(y_pos, performance, align='center', alpha=0.5)\nplt.xticks(y_pos, objects)\nplt.ylabel('Average Expanded Trips')\nplt.title('Average Expanded Trips Per Person')\n \nplt.show()",
"_____no_output_____"
]
],
[
[
"# Permutation Tests",
"_____no_output_____"
]
],
[
[
"users = edb.get_uuid_db().find()",
"_____no_output_____"
],
[
"import pandas as pd\nfrom scipy import stats\nimport emission.storage.timeseries.abstract_timeseries as esta\nfrom datetime import timedelta, date, tzinfo, datetime\nimport numpy as np\n\n# Create a dataframe with columns user_id, number of diary checks, week number, and group.\ndf = pd.DataFrame()\ninformation_count = 0\nemotion_count = 0\ncontrol_count = 0\nfor i in range(len(users)):\n user_id = users[i]\n start = arrow.get('2018-03-28', 'YYYY-MM-DD')\n end = arrow.get('2018-06-06', 'YYYY-MM-DD')\n vals = []\n week_val = -1\n for week in arrow.Arrow.range('week', start, end):\n ts = esta.TimeSeries.get_time_series(user_id)\n begin_ts = week.timestamp\n end_ts = (week + timedelta(weeks=1)).timestamp\n last_period_tq = estt.TimeQuery(\"data.start_ts\", begin_ts, end_ts)\n cs_df = ts.get_data_df(\"analysis/inferred_section\", time_query=last_period_tq)\n total = 0\n if cs_df.shape[0] <= 0:\n continue\n try:\n for event in expanded_trip_events:\n if event['user_id'] == user_id:\n if event['ts'] > begin_ts and event['ts'] <= end_ts:\n total += 1\n except:\n continue\n vals.append(total)\n #Always use lists only where the number of datapoints is greater than 2 otherwise we get a perfect correlation\n weeks = np.arange(len(vals))\n if len(weeks) > 1:\n group = \"none\"\n try:\n client = edb.get_profile_db().find_one({\"user_id\": user_id})['client']\n if client == 'urap-2017-information':\n group = \"information\"\n information_count += 1\n elif client == 'urap-2017-emotion':\n group = \"emotion\"\n emotion_count += 1\n elif client == 'urap-2017-control':\n group = \"control\"\n control_count += 1\n except:\n continue\n df = df.append({'uuid': user_id, 'group': group, 'total': sum(vals)}, ignore_index=True)",
"_____no_output_____"
],
[
"df.groupby('group').mean()",
"_____no_output_____"
],
[
"e_c = df[df['group'] != 'information']\nsf.perm_test(e_c['group'], e_c['total'], sf.mean_diff, 100000)",
"_____no_output_____"
],
[
"i_c = df[df['group'] != 'emotion']\nsf.perm_test(i_c['group'], i_c['total'], sf.mean_diff, 100000)",
"_____no_output_____"
],
[
"i_e = df[df['group'] != 'control']\nsf.perm_test(i_e['group'], i_e['total'], sf.mean_diff, 100000)",
"_____no_output_____"
]
],
[
[
"# Bootstrapping Tests",
"_____no_output_____"
]
],
[
[
"e_c = df[df['group'] != 'information']\nsf.bootstrap_test(e_c['group'], e_c['total'], sf.mean_diff, 100000)",
"_____no_output_____"
]
],
[
[
"# Mann Whitney U Tests",
"_____no_output_____"
]
],
[
[
"from scipy.stats import mannwhitneyu\n\ncontrol = df[df['group'] == 'control']\ncontrol_array = control.as_matrix(columns=control.columns[1:2])\n\nemotion = df[df['group'] == 'emotion']\nemotion_array = emotion.as_matrix(columns=emotion.columns[1:2])\n\nprint(mannwhitneyu(emotion_array, control_array))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e752da841e482f94378f3e3f21d0866014ec779a | 3,587 | ipynb | Jupyter Notebook | perceptron_implementation.ipynb | FallinLoveMan/Depom | b0096bb3196701a805ce999c1f636f774f4e8385 | [
"MIT"
] | 4 | 2019-12-24T19:51:07.000Z | 2020-02-05T09:11:28.000Z | perceptron_implementation.ipynb | yitopeligo/perceptron-training | b0096bb3196701a805ce999c1f636f774f4e8385 | [
"MIT"
] | null | null | null | perceptron_implementation.ipynb | yitopeligo/perceptron-training | b0096bb3196701a805ce999c1f636f774f4e8385 | [
"MIT"
] | 4 | 2021-04-27T21:08:08.000Z | 2021-09-14T15:02:28.000Z | 26.969925 | 441 | 0.456928 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"class Perceptron(object):\n\n def __init__(self, eta=1, epochs=100):\n self.eta = eta\n self.epochs = epochs\n\n def train(self, X, y):\n\n self.w_ = np.zeros(X.shape[1])\n self.errors_ = []\n\n for epoch in range(int(self.epochs) + 1):\n errors = 0\n batch_errors = []\n\n for xi, target in zip(X, y): \n pred_error = (target - self.predict(xi))\n batch_errors.append(pred_error)\n \n if pred_error != 0:\n errors = (errors + 1)\n \n for i in range(X.shape[1]):\n self.w_[i] += self.eta * np.dot(X[:,i], batch_errors)\n \n self.errors_.append(errors)\n return self \n\n def net_input(self, X):\n return np.dot(X, self.w_)\n\n def predict(self, X):\n return np.where(self.net_input(X) > 0.0, 1, 0)",
"_____no_output_____"
],
[
"df = pd.read_csv(\"data/Example.tsv\", sep='\\t', header=None)\n\ndf.head()\n\ny = df.iloc[:, 0].values\ny = np.where(y == 'A', 1.0, 0.0) #1 for label A, 0 for B\nX_wo = df.iloc[:, [1,2]].values #Features without ones vector\nones = np.ones((X_wo.shape[0]))\n\nX = np.ones((X_wo.shape[0],X_wo.shape[1]+1) )#All ones in shape of m, n+1 (+1 for incoming \"ones\" column)\nX[:,1:] = X_wo #Input Vector",
"_____no_output_____"
],
[
"ppn = Perceptron(epochs=100.0, eta=1.0)\n\nppn.train(X,y)\n\nprint(ppn.w_)\nprint(ppn.errors_)",
"[288. -0.608382 248.160772]\n[200, 152, 94, 107, 89, 268, 95, 91, 87, 88, 89, 87, 92, 111, 94, 113, 88, 240, 93, 89, 90, 89, 94, 89, 98, 233, 94, 88, 88, 88, 90, 86, 92, 100, 293, 96, 90, 87, 86, 93, 91, 90, 90, 92, 212, 95, 93, 83, 86, 90, 96, 108, 97, 149, 96, 132, 93, 175, 99, 87, 91, 114, 97, 146, 93, 129, 92, 173, 99, 87, 90, 107, 97, 167, 93, 92, 97, 233, 96, 90, 84, 89, 90, 89, 102, 86, 269, 95, 91, 85, 88, 90, 88, 93, 107, 93, 125, 93, 206, 97, 90]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e752dc47c7ccd52d7351333cd06cecdacabadb7e | 314,202 | ipynb | Jupyter Notebook | Document/Document.ipynb | GeetDsa/pycm | 2665124b95abe18cec0729deaefe99e2e916cbeb | [
"MIT"
] | null | null | null | Document/Document.ipynb | GeetDsa/pycm | 2665124b95abe18cec0729deaefe99e2e916cbeb | [
"MIT"
] | null | null | null | Document/Document.ipynb | GeetDsa/pycm | 2665124b95abe18cec0729deaefe99e2e916cbeb | [
"MIT"
] | null | null | null | 27.336175 | 862 | 0.484835 | [
[
[
"<p style=\"z-index: 101;background: #fde073;text-align: center;line-height: 2.5;overflow: hidden;font-size:22px;\">Please <a href=\"#Cite\">cite us</a> if you use the software</p>",
"_____no_output_____"
],
[
"# PyCM Document",
"_____no_output_____"
],
[
"### Version : 2.6\n-----",
"_____no_output_____"
],
[
"## Table of contents",
"_____no_output_____"
],
[
"<ul>\n <li><a href=\"#Overview\">Overview</a></li>\n <li><a href=\"#Installation\">Installation</a></li>\n <ol>\n <li><a href=\"#Source-code\">Source Code</a></li>\n <li><a href=\"#PyPI\">PyPI</a></li>\n <li><a href=\"#Easy-install\">Easy Install</a></li>\n <li><a href=\"#Docker\">Docker</a></li>\n </ol>\n \n <li><a href=\"#Usage\">Usage</a></li>\n <ol>\n <li><a href=\"#From-vector\">From Vector</a></li>\n <li><a href=\"#Direct-CM\">Direct CM</a></li>\n <li><a href=\"#Activation-threshold\">Activation Threshold</a></li>\n <li><a href=\"#Load-from-file\">Load From File</a></li>\n <li><a href=\"#Sample-weights\">Sample Weights</a></li>\n <li><a href=\"#Transpose\">Transpose</a></li>\n <li><a href=\"#Relabel\">Relabel</a></li>\n <li><a href=\"#Online-help\">Online Help</a></li>\n <li><a href=\"#Parameter-recommender\">Parameter Recommender</a></li>\n <li><a href=\"#Comapre\">Comapre</a></li>\n <li><a href=\"#Acceptable-data-types\">Acceptable Data Types</a></li>\n </ol>\n \n <li><a href=\"#Basic-parameters\">Basic Parameters</a></li>\n <ol>\n <li><a href=\"#TP-(True-positive)\">True Positive</a></li>\n <li><a href=\"#TN-(True-negative)\">True Negative</a></li>\n <li><a href=\"#FP-(False-positive)\">False Positive</a></li>\n <li><a href=\"#FN-(False-negative)\">False Negative</a></li>\n <li><a href=\"#P-(Condition-positive)\">Condition Positive</a></li>\n <li><a href=\"#N-(Condition-negative)\">Condition Negative</a></li>\n <li><a href=\"#TOP-(Test-outcome-positive)\">Test Outcome Positive</a></li>\n <li><a href=\"#TON-(Test-outcome-negative)\">Test Outcome Negative</a></li>\n <li><a href=\"#POP-(Population)\">Population</a></li>\n </ol>\n \n <li><a href=\"#Class-statistics\">Class Statistics</a></li>\n <ol>\n <li><a href=\"#TPR-(True-positive-rate)\">True Positive Rate</a></li>\n <li><a href=\"#TNR-(True-negative-rate)\">True Negative Rate</a></li>\n <li><a href=\"#PPV-(Positive-predictive-value)\">Positive Predictive Value</a></li>\n <li><a href=\"#NPV-(Negative-predictive-value)\">Negative Predictive Value</a></li>\n <li><a href=\"#FNR-(False-negative-rate)\">False Negative Rate</a></li>\n <li><a href=\"#FPR-(False-positive-rate)\">False Positive Rate</a></li>\n <li><a href=\"#FDR-(False-discovery-rate)\">False Discovery Rate</a></li>\n <li><a href=\"#FOR-(False-omission-rate)\">False Omission Rate</a></li>\n <li><a href=\"#ACC-(Accuracy)\">Accuracy</a></li>\n <li><a href=\"#ERR-(Error-rate)\">Error Rate</a></li>\n <li><a href=\"#FBeta-Score\">FBeta Score</a></li>\n <li><a href=\"#MCC-(Matthews-correlation-coefficient)\">Matthews Correlation Coefficient</a></li>\n <li><a href=\"#BM-(Bookmaker-informedness)\">Informedness</a></li>\n <li><a href=\"#MK-(Markedness)\">Markedness</a></li>\n <li><a href=\"#PLR-(Positive-likelihood-ratio)\">Positive Likelihood Ratio</a></li>\n <li><a href=\"#NLR-(Negative-likelihood-ratio)\">Negative Likelihood Ratio</a></li>\n <li><a href=\"#DOR-(Diagnostic-odds-ratio)\">Diagnostic Odds Ratio</a></li>\n <li><a href=\"#PRE-(Prevalence)\">Prevalence</a></li>\n <li><a href=\"#G-(G-measure)\">G-Measure</a></li>\n <li><a href=\"#RACC-(Random-accuracy)\">Random Accuracy</a></li>\n <li><a href=\"#RACCU-(Random-accuracy-unbiased)\">Random Accuracy Unbiased</a></li>\n <li><a href=\"#J-(Jaccard-index)\">Jaccard Index</a></li>\n <li><a href=\"#IS-(Information-score)\">Information Score</a></li>\n <li><a href=\"#CEN-(Confusion-entropy)\">Confusion Entropy</a></li>\n <li><a href=\"#MCEN-(Modified-confusion-entropy)\">Modified Confusion Entropy</a></li>\n <li><a href=\"#AUC-(Area-under-the-ROC-curve)\">Area Under The ROC Curve</a></li>\n <li><a href=\"#dInd-(Distance-index)\">Distance Index</a></li>\n <li><a href=\"#sInd-(Similarity-index)\">Similarity Index</a></li>\n <li><a href=\"#DP-(Discriminant-power)\">Discriminant Power</a></li>\n <li><a href=\"#Y-(Youden-index)\">Youden Index</a></li>\n <li><a href=\"#PLRI-(Positive-likelihood-ratio-interpretation)\">Positive Likelihood Ratio Interpretation</a></li>\n <li><a href=\"#NLRI-(Negative-likelihood-ratio-interpretation)\">Negative Likelihood Ratio Interpretation</a></li>\n <li><a href=\"#DPI-(Discriminant-power-interpretation)\">Discriminant Power Interpretation</a></li>\n <li><a href=\"#AUCI-(AUC-value-interpretation)\">AUC Value Interpretation</a></li>\n <li><a href=\"#MCCI-(Matthews-correlation-coefficient-interpretation)\">Matthews Correlation Coefficient Interpretation</a></li>\n <li><a href=\"#QI-(Yule's-Q-interpretation)\">Yule's Q Interpretation</a></li>\n <li><a href=\"#GI-(Gini-index)\">Gini Index</a></li>\n <li><a href=\"#LS-(Lift-score)\">Lift Score</a></li>\n <li><a href=\"#AM-(Automatic/Manual)\">Automatic/Manual</a></li>\n <li><a href=\"#BCD-(Bray-Curtis-dissimilarity)\">Bray-Curtis Dissimilarity</a></li>\n <li><a href=\"#OP-(Optimized-precision)\">Optimized Precision</a></li>\n <li><a href=\"#IBA-(Index-of-balanced-accuracy)\">Index of Balanced Accuracy</a></li>\n <li><a href=\"#GM-(G-mean)\">G-Mean</a></li>\n <li><a href=\"#Q-(Yule's-Q)\">Yule's Q</a></li>\n <li><a href=\"#AGM-(Adjusted-G-mean)\">Adjusted G-Mean</a></li> \n <li><a href=\"#AGF-(Adjusted-F-score)\">Adjusted F-Score</a></li>\n <li><a href=\"#OC-(Overlap-coefficient)\">Overlap Coefficient</a></li>\n <li><a href=\"#OOC-(Otsuka-Ochiai-coefficient)\">Otsuka Ochiai Coefficient</a></li>\n <li><a href=\"#TI-(Tversky-index)\">Tversky Index</a></li> \n <li><a href=\"#AUPR-(Area-under-the-PR-curve)\">Area Under The PR Curve</a></li> \n <li><a href=\"#ICSI-(Individual-classification-success-index)\">Individual Classification Success Index</a></li> \n <li><a href=\"#CI-(Confidence-interval)\">Confidence Interval</a></li> \n <li><a href=\"#NB-(Net-benefit)\">Net Benefit</a></li>\n </ol>\n \n <li><a href=\"#Overall-statistics\">Overall Statistics</a></li>\n <ol>\n <li><a href=\"#Kappa\">Kappa</a></li>\n <li><a href=\"#Kappa-unbiased\">Kappa Unbiased</a></li>\n <li><a href=\"#Kappa-no-prevalence\">Kappa No Prevalence</a></li>\n <li><a href=\"#Kappa-standard-error\">Kappa Standard Error</a></li>\n <li><a href=\"#Kappa-95%-CI\">Kappa 95% CI</a></li>\n <li><a href=\"#Chi-squared\">Chi Squared</a></li>\n <li><a href=\"#Chi-squared-DF\">Chi Squared DF</a></li>\n <li><a href=\"#Phi-squared\">Phi Squared</a></li>\n <li><a href=\"#Cramer's-V\">Cramer's V</a></li>\n <li><a href=\"#Standard-error\">Standard Error</a></li>\n <li><a href=\"#95%-CI\">95% CI</a></li>\n <li><a href=\"#Bennett's-S\">Bennett's S</a></li>\n <li><a href=\"#Scott's-Pi\">Scott's PI</a></li>\n <li><a href=\"#Gwet's-AC1\">Gwet's AC1</a></li>\n <li><a href=\"#Reference-entropy\">Reference Entropy</a></li>\n <li><a href=\"#Response-entropy\">Response Entropy</a></li>\n <li><a href=\"#Cross-entropy\">Cross Entropy</a></li>\n <li><a href=\"#Joint-entropy\">Joint Entropy</a></li>\n <li><a href=\"#Conditional-entropy\">Conditional Entropy</a></li>\n <li><a href=\"#Kullback-Leibler-divergence\">Kullback-Leibler Divergence</a></li>\n <li><a href=\"#Mutual-information\">Mutual Information</a></li>\n <li><a href=\"#Goodman-&-Kruskal's-lambda-A\">Goodman-Kruskal's Lambda A</a></li>\n <li><a href=\"#Goodman-&-Kruskal's-lambda-B\">Goodman-Kruskal's Lambda B</a></li>\n <li><a href=\"#SOA1-(Landis-&-Koch's-benchmark)\">Landis-Koch's Benchmark</a></li>\n <li><a href=\"#SOA2-(Fleiss'-benchmark)\">Fleiss' Benchmark</a></li>\n <li><a href=\"#SOA3-(Altman's-benchmark)\">Altman's Benchmark</a></li>\n <li><a href=\"#SOA4-(Cicchetti's-benchmark)\">Cicchetti's Benchmark</a></li>\n <li><a href=\"#SOA5-(Cramer's-benchmark)\">Cramer's Benchmark</a></li>\n <li><a href=\"#SOA6-(Matthews's-benchmark)\">Matthews's Benchmark</a></li>\n <li><a href=\"#Overall_ACC\">Overall Accuracy</a></li>\n <li><a href=\"#Overall_RACC\">Overall Random Accuracy</a></li>\n <li><a href=\"#Overall_RACCU\">Overall Random Accuracy Unbiased</a></li>\n <li><a href=\"#PPV_Micro\">Positive Predictive Value Micro</a></li>\n <li><a href=\"#TPR_Micro\">True Positive Rate Micro</a></li>\n <li><a href=\"#TNR_Micro\">True Negative Rate Micro</a></li>\n <li><a href=\"#FPR_Micro\">False Positive Rate Micro</a></li>\n <li><a href=\"#FNR_Micro\">False Negative Rate Micro</a></li>\n <li><a href=\"#F1_Micro\">F1 Score Micro</a></li>\n <li><a href=\"#PPV_Macro\">Positive Predictive Value Macro</a></li>\n <li><a href=\"#TPR_Macro\">True Positive Rate Macro</a></li>\n <li><a href=\"#TNR_Macro\">True Negative Rate Macro</a></li>\n <li><a href=\"#FPR_Macro\">False Positive Rate Macro</a></li>\n <li><a href=\"#FNR_Macro\">False Negative Rate Macro</a></li>\n <li><a href=\"#F1_Macro\">F1 Score Macro</a></li>\n <li><a href=\"#ACC_Macro\">Accuracy Macro</a></li>\n <li><a href=\"#Overall_J\">Overall Jaccard Index</a></li>\n <li><a href=\"#Hamming-loss\">Hamming Loss</a></li>\n <li><a href=\"#Zero-one-loss\">Zero-one Loss</a></li>\n <li><a href=\"#NIR-(No-information-rate)\">No Information Rate</a></li>\n <li><a href=\"#P-Value\">P Value</a></li>\n <li><a href=\"#Overall_CEN\">Overall Confusion Entropy</a></li>\n <li><a href=\"#Overall_MCEN\">Overall Modified Confusion Entropy</a></li>\n <li><a href=\"#Overall_MCC\">Overall Matthews Correlation Coefficient</a></li>\n <li><a href=\"#RR-(Global-performance-index)\">Global Performance Index</a></li>\n <li><a href=\"#CBA-(Class-balance-accuracy)\">Class Balance Accuracy</a></li>\n <li><a href=\"#AUNU\">AUNU</a></li>\n <li><a href=\"#AUNP\">AUNP</a></li>\n <li><a href=\"#RCI-(Relative-classifier-information)\">Relative Classifier Information</a></li>\n <li><a href=\"#Pearson's-C\">Pearson's C</a></li>\n <li><a href=\"#CSI-(Classification-success-index)\">Classification Success Index</a></li>\n <li><a href=\"#ARI-(Adjusted-Rand-index)\">Adjusted Rand Index</a></li>\n </ol>\n \n <li><a href=\"#Print\">Print</a></li>\n <ol>\n <li><a href=\"#Full\">Full</a></li>\n <li><a href=\"#Matrix\">Matrix</a></li>\n <li><a href=\"#Normalized-matrix\">Normalized Matrix</a></li>\n <li><a href=\"#Stat\">Stat</a></li>\n <li><a href=\"#Compare-report\">Compare Report</a></li>\n </ol>\n \n <li><a href=\"#Save\">Save</a></li>\n <ol>\n <li><a href=\"#.pycm-file\">pycm</a></li>\n <li><a href=\"#HTML\">HTML</a></li>\n <li><a href=\"#CSV\">CSV</a></li>\n <li><a href=\"#OBJ\">object</a></li>\n <li><a href=\"#comp\">comp</a></li>\n </ol>\n \n <li><a href=\"#Input-errors\">Input Errors</a></li>\n <li><a href=\"#Examples\">Examples</a></li>\n <li><a href=\"#Cite\">Cite</a></li>\n <li><a href=\"#References\">References</a></li>",
"_____no_output_____"
],
[
"## Overview",
"_____no_output_____"
],
[
"<p style=\"text-align:justify;\">\nPyCM is a multi-class confusion matrix library written in Python that supports both input data vectors and direct matrix, and a proper tool for post-classification model evaluation that supports most classes and overall statistics parameters.\t\nPyCM is the swiss-army knife of confusion matrices, targeted mainly at data scientists that need a broad array of metrics for predictive models and accurate evaluation of a large variety of classifiers.\n</p>",
"_____no_output_____"
],
[
"<div style=\"text-align:center;\">\n <img src=\"../Otherfiles/block_diagram.jpg\">\n</div>\n<center><p style=\"text-align:center;\">Fig1. ConfusionMatrix Block Diagram</p></center>\n",
"_____no_output_____"
],
[
"## Installation\t",
"_____no_output_____"
],
[
"⚠️ PyCM 2.4 is the last version to support **Python 2.7** & **Python 3.4**",
"_____no_output_____"
],
[
"### Source code\n- Download [Version 2.6](https://github.com/sepandhaghighi/pycm/archive/v2.6.zip) or [Latest Source ](https://github.com/sepandhaghighi/pycm/archive/dev.zip)\n- Run `pip install -r requirements.txt` or `pip3 install -r requirements.txt` (Need root access)\n- Run `python3 setup.py install` or `python setup.py install` (Need root access)",
"_____no_output_____"
],
[
"### PyPI\n\n\n- Check [Python Packaging User Guide](https://packaging.python.org/installing/) \n- Run `pip install pycm==2.6` or `pip3 install pycm==2.6` (Need root access)",
"_____no_output_____"
],
[
"### Conda\n\n- Check [Conda Managing Package](https://conda.io/docs/user-guide/tasks/manage-pkgs.html#installing-packages-from-anaconda-org)\n- `conda install -c sepandhaghighi pycm` (Need root access)",
"_____no_output_____"
],
[
"### Easy install\n\n- Run `easy_install --upgrade pycm` (Need root access)",
"_____no_output_____"
],
[
"### Docker\t\n\n- Run `docker pull sepandhaghighi/pycm` (Need root access)\n- Configuration :\n\t- Ubuntu 16.04\n\t- Python 3.6",
"_____no_output_____"
],
[
"## Usage",
"_____no_output_____"
],
[
"### From vector",
"_____no_output_____"
]
],
[
[
"from pycm import *",
"_____no_output_____"
],
[
"y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]\ny_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]",
"_____no_output_____"
],
[
"cm = ConfusionMatrix(y_actu, y_pred,digit=5)",
"_____no_output_____"
]
],
[
[
"<ul>\n <li> <span style=\"color:red;\">Notice </span> : `digit` (the number of digits to the right of the decimal point in a number) is new in <span style=\"color:red;\">version 0.6</span> (default value : 5)</li>\n <li>Only for print and save</li>\n</ul>\n",
"_____no_output_____"
]
],
[
[
"cm",
"_____no_output_____"
],
[
"cm.actual_vector",
"_____no_output_____"
],
[
"cm.predict_vector",
"_____no_output_____"
],
[
"cm.classes",
"_____no_output_____"
],
[
"cm.class_stat",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `cm.statistic_result` prev versions (0.2 >)</li>\n</ul>",
"_____no_output_____"
]
],
[
[
"cm.overall_stat",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `_` removed from overall statistics names in <span style=\"color:red;\">version 1.6</span> </li>\n</ul>",
"_____no_output_____"
]
],
[
[
"cm.table",
"_____no_output_____"
],
[
"cm.matrix",
"_____no_output_____"
],
[
"cm.normalized_matrix",
"_____no_output_____"
],
[
"cm.normalized_table",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `matrix`, `normalized_matrix` & `normalized_table` added in <span style=\"color:red;\">version 1.5</span> (changed from print style)</li>\n</ul>",
"_____no_output_____"
]
],
[
[
"import numpy",
"_____no_output_____"
],
[
"y_actu = numpy.array([2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2])\ny_pred = numpy.array([0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2])",
"_____no_output_____"
],
[
"cm = ConfusionMatrix(y_actu, y_pred,digit=5)",
"_____no_output_____"
],
[
"cm",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `numpy.array` support in <span style=\"color:red;\">versions > 0.7</span></li>\n</ul> ",
"_____no_output_____"
],
[
"### Direct CM",
"_____no_output_____"
]
],
[
[
"cm2 = ConfusionMatrix(matrix={0: {0: 3, 1: 0, 2: 0}, 1: {0: 0, 1: 1, 2: 2}, 2: {0: 2, 1: 1, 2: 3}},digit=5)",
"_____no_output_____"
],
[
"cm2",
"_____no_output_____"
],
[
"cm2.actual_vector",
"_____no_output_____"
],
[
"cm2.predict_vector",
"_____no_output_____"
],
[
"cm2.classes",
"_____no_output_____"
],
[
"cm2.class_stat",
"_____no_output_____"
],
[
"cm2.overall_stat",
"_____no_output_____"
]
],
[
[
"\n<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n <li>In direct matrix mode `actual_vector` and `predict_vector` are empty</li>\n</ul> ",
"_____no_output_____"
],
[
"### Activation threshold",
"_____no_output_____"
],
[
"`threshold` is added in `version 0.9` for real value prediction.\t\t\t\n\t\t\t\t\t\t\nFor more information visit <a href=\"#Example-3-(Activation-threshold)\">Example 3</a>",
"_____no_output_____"
],
[
"\n<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.9</span> </li>\n</ul>\n",
"_____no_output_____"
],
[
"### Load from file",
"_____no_output_____"
],
[
"`file` is added in `version 0.9.5` in order to load saved confusion matrix with `.obj` format generated by `save_obj` method.\n\nFor more information visit <a href=\"#Example-4-(File)\">Example 4</a>\n",
"_____no_output_____"
],
[
"\n<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.9.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Sample weights",
"_____no_output_____"
],
[
"`sample_weight` is added in `version 1.2`\n\nFor more information visit <a href=\"#Example-5-(Sample-weights)\">Example 5</a>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Transpose",
"_____no_output_____"
],
[
"`transpose` is added in `version 1.2` in order to transpose input matrix (only in `Direct CM` mode)",
"_____no_output_____"
]
],
[
[
"cm = ConfusionMatrix(matrix={0: {0: 3, 1: 0, 2: 0}, 1: {0: 0, 1: 1, 2: 2}, 2: {0: 2, 1: 1, 2: 3}},digit=5,transpose=True)",
"_____no_output_____"
],
[
"cm.print_matrix()",
"Predict 0 1 2 \nActual\n0 3 0 2 \n\n1 0 1 1 \n\n2 0 2 3 \n\n\n"
]
],
[
[
"\n<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Relabel",
"_____no_output_____"
],
[
"`relabel` method is added in `version 1.5` in order to change ConfusionMatrix class names.",
"_____no_output_____"
]
],
[
[
"cm.relabel(mapping={0:\"L1\",1:\"L2\",2:\"L3\"})",
"_____no_output_____"
],
[
"cm",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Online help",
"_____no_output_____"
],
[
"`online_help` function is added in `version 1.1` in order to open each statistics definition in web browser.",
"_____no_output_____"
],
[
"```python\n\n>>> from pycm import online_help\n>>> online_help(\"J\")\n>>> online_help(\"J\", alt_link=True)\n>>> online_help(\"SOA1(Landis & Koch)\")\n>>> online_help(2)\n\n```",
"_____no_output_____"
],
[
"* List of items are available by calling `online_help()` (without argument)",
"_____no_output_____"
],
[
"* If PyCM website is not available, set `alt_link = True`",
"_____no_output_____"
]
],
[
[
"online_help()",
"Please choose one parameter : \n\nExample : online_help(\"J\") or online_help(2)\n\n1-95% CI\n2-ACC\n3-ACC Macro\n4-AGF\n5-AGM\n6-AM\n7-ARI\n8-AUC\n9-AUCI\n10-AUNP\n11-AUNU\n12-AUPR\n13-BCD\n14-BM\n15-Bennett S\n16-CBA\n17-CEN\n18-CSI\n19-Chi-Squared\n20-Chi-Squared DF\n21-Conditional Entropy\n22-Cramer V\n23-Cross Entropy\n24-DOR\n25-DP\n26-DPI\n27-ERR\n28-F0.5\n29-F1\n30-F1 Macro\n31-F1 Micro\n32-F2\n33-FDR\n34-FN\n35-FNR\n36-FNR Macro\n37-FNR Micro\n38-FOR\n39-FP\n40-FPR\n41-FPR Macro\n42-FPR Micro\n43-G\n44-GI\n45-GM\n46-Gwet AC1\n47-Hamming Loss\n48-IBA\n49-ICSI\n50-IS\n51-J\n52-Joint Entropy\n53-KL Divergence\n54-Kappa\n55-Kappa 95% CI\n56-Kappa No Prevalence\n57-Kappa Standard Error\n58-Kappa Unbiased\n59-LS\n60-Lambda A\n61-Lambda B\n62-MCC\n63-MCCI\n64-MCEN\n65-MK\n66-Mutual Information\n67-N\n68-NIR\n69-NLR\n70-NLRI\n71-NPV\n72-OC\n73-OOC\n74-OP\n75-Overall ACC\n76-Overall CEN\n77-Overall J\n78-Overall MCC\n79-Overall MCEN\n80-Overall RACC\n81-Overall RACCU\n82-P\n83-P-Value\n84-PLR\n85-PLRI\n86-POP\n87-PPV\n88-PPV Macro\n89-PPV Micro\n90-PRE\n91-Pearson C\n92-Phi-Squared\n93-Q\n94-QI\n95-RACC\n96-RACCU\n97-RCI\n98-RR\n99-Reference Entropy\n100-Response Entropy\n101-SOA1(Landis & Koch)\n102-SOA2(Fleiss)\n103-SOA3(Altman)\n104-SOA4(Cicchetti)\n105-SOA5(Cramer)\n106-SOA6(Matthews)\n107-Scott PI\n108-Standard Error\n109-TN\n110-TNR\n111-TNR Macro\n112-TNR Micro\n113-TON\n114-TOP\n115-TP\n116-TPR\n117-TPR Macro\n118-TPR Micro\n119-Y\n120-Zero-one Loss\n121-dInd\n122-sInd\n"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `alt_link` , new in <span style=\"color:red;\">version 2.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Parameter recommender",
"_____no_output_____"
],
[
"This option has been added in `version 1.9` to recommend the most related parameters considering the characteristics of the input dataset. The suggested parameters are selected according to some characteristics of the input such as being balance/imbalance and binary/multi-class. All suggestions can be categorized into three main groups: imbalanced dataset, binary classification for a balanced dataset, and multi-class classification for a balanced dataset. The recommendation lists have been gathered according to the respective paper of each parameter and the capabilities which had been claimed by the paper.",
"_____no_output_____"
],
[
"<div style=\"text-align:center;\">\n <img src=\"../Otherfiles/recommendation_block_diagram.jpg\">\n</div>\n<center><p style=\"text-align:center;\">Fig2. Parameter Recommender Block Diagram</p></center>",
"_____no_output_____"
]
],
[
[
"cm.imbalance",
"_____no_output_____"
],
[
"cm.binary",
"_____no_output_____"
],
[
"cm.recommended_list",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : also available in HTML report </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : The recommender system assumes that the input is the result of classification over the whole data rather than just a part of it. If the confusion matrix is the result of test data classification, the recommendation is not valid. </li>\n</ul>\n",
"_____no_output_____"
],
[
"### Comapre",
"_____no_output_____"
],
[
"In `version 2.0`, a method for comparing several confusion matrices is introduced. This option is a combination of several overall and class-based benchmarks. Each of the benchmarks evaluates the performance of the classification algorithm from good to poor and give them a numeric score. The score of good and poor performances are 1 and 0, respectively.\n\nAfter that, two scores are calculated for each confusion matrices, overall and class-based. The overall score is the average of the score of six overall benchmarks which are Landis & Koch, Fleiss, Altman, Cicchetti, Cramer, and Matthews. In the same manner, the class-based score is the average of the score of six class-based benchmarks which are Positive Likelihood Ratio Interpretation, Negative Likelihood Ratio Interpretation, Discriminant Power Interpretation, AUC value Interpretation, Matthews Correlation Coefficient Interpretation and Yule's Q Interpretation. It should be noticed that if one of the benchmarks returns none for one of the classes, that benchmarks will be eliminated in total averaging. If the user sets weights for the classes, the averaging over the value of class-based benchmark scores will transform to a weighted average.\n\nIf the user sets the value of `by_class` boolean input `True`, the best confusion matrix is the one with the maximum class-based score. Otherwise, if a confusion matrix obtains the maximum of both overall and class-based scores, that will be reported as the best confusion matrix, but in any other case, the compared object doesn’t select the best confusion matrix.",
"_____no_output_____"
],
[
"<div style=\"text-align:center;\">\n <img src=\"../Otherfiles/compare_block_diagram.jpg\">\n</div>\n<center><p style=\"text-align:center;\">Fig3. Compare Block Diagram</p></center>",
"_____no_output_____"
]
],
[
[
"cm2 = ConfusionMatrix(matrix={0:{0:2,1:50,2:6},1:{0:5,1:50,2:3},2:{0:1,1:7,2:50}})\ncm3 = ConfusionMatrix(matrix={0:{0:50,1:2,2:6},1:{0:50,1:5,2:3},2:{0:1,1:55,2:2}})",
"_____no_output_____"
],
[
"cp = Compare({\"cm2\":cm2,\"cm3\":cm3})",
"_____no_output_____"
],
[
"print(cp)",
"Best : cm2\n\nRank Name Class-Score Overall-Score\n1 cm2 9.05 2.55\n2 cm3 6.05 1.98333\n\n"
],
[
"cp.scores",
"_____no_output_____"
],
[
"cp.sorted",
"_____no_output_____"
],
[
"cp.best",
"_____no_output_____"
],
[
"cp.best_name",
"_____no_output_____"
],
[
"cp2 = Compare({\"cm2\":cm2,\"cm3\":cm3},by_class=True,weight={0:5,1:1,2:1})",
"_____no_output_____"
],
[
"print(cp2)",
"Best : cm3\n\nRank Name Class-Score Overall-Score\n1 cm3 19.05 1.98333\n2 cm2 14.65 2.55\n\n"
]
],
[
[
"### Acceptable data types\t",
"_____no_output_____"
],
[
"#### ConfusionMatrix",
"_____no_output_____"
],
[
"1. `actual_vector` : python `list` or numpy `array` of any stringable objects\n2. `predict_vector` : python `list` or numpy `array` of any stringable objects\n3. `matrix` : `dict`\n4. `digit`: `int`\n5. `threshold` : `FunctionType (function or lambda)`\n6. `file` : `File object`\n7. `sample_weight` : python `list` or numpy `array` of numbers\n8. `transpose` : `bool`",
"_____no_output_____"
],
[
"* run `help(ConfusionMatrix)` for more information",
"_____no_output_____"
],
[
"#### Compare",
"_____no_output_____"
],
[
"1. `cm_dict` : python `dict` of `ConfusionMatrix` object (`str` : `ConfusionMatrix`)\n2. `by_class` : `bool`\n3. `weight` : python `dict` of class weights (`class_name` : `float`)\n4. `digit`: `int`",
"_____no_output_____"
],
[
"* run `help(Compare)` for more information",
"_____no_output_____"
],
[
"## Basic parameters",
"_____no_output_____"
],
[
"### TP (True positive)",
"_____no_output_____"
],
[
"A true positive test result is one that detects the condition when the\ncondition is present (correctly identified) [[3]](#ref3).",
"_____no_output_____"
]
],
[
[
"cm.TP",
"_____no_output_____"
]
],
[
[
"### TN (True negative)",
"_____no_output_____"
],
[
"A true negative test result is one that does not detect the condition when\nthe condition is absent (correctly rejected) [[3]](#ref3).",
"_____no_output_____"
]
],
[
[
"cm.TN",
"_____no_output_____"
]
],
[
[
"### FP (False positive)",
"_____no_output_____"
],
[
"A false positive test result is one that detects the condition when the\ncondition is absent (incorrectly identified) [[3]](#ref3).",
"_____no_output_____"
]
],
[
[
"cm.FP",
"_____no_output_____"
]
],
[
[
"### FN (False negative)",
"_____no_output_____"
],
[
"A false negative test result is one that does not detect the condition when\nthe condition is present (incorrectly rejected) [[3]](#ref3).",
"_____no_output_____"
]
],
[
[
"cm.FN",
"_____no_output_____"
]
],
[
[
"### P (Condition positive)",
"_____no_output_____"
],
[
"Number of positive samples.\nAlso known as support (the number of occurrences of each class in y_true) [[3]](#ref3).",
"_____no_output_____"
],
[
"$$P=TP+FN$$",
"_____no_output_____"
]
],
[
[
"cm.P",
"_____no_output_____"
]
],
[
[
"### N (Condition negative)",
"_____no_output_____"
],
[
"Number of negative samples [[3]](#ref3).",
"_____no_output_____"
],
[
"$$N=TN+FP$$",
"_____no_output_____"
]
],
[
[
"cm.N",
"_____no_output_____"
]
],
[
[
"### TOP (Test outcome positive)",
"_____no_output_____"
],
[
"Number of positive outcomes [[3]](#ref3).",
"_____no_output_____"
],
[
"$$TOP=TP+FP$$",
"_____no_output_____"
]
],
[
[
"cm.TOP",
"_____no_output_____"
]
],
[
[
"### TON (Test outcome negative)",
"_____no_output_____"
],
[
"Number of negative outcomes [[3]](#ref3).",
"_____no_output_____"
],
[
"$$TON=TN+FN$$",
"_____no_output_____"
]
],
[
[
"cm.TON",
"_____no_output_____"
]
],
[
[
"### POP (Population)",
"_____no_output_____"
],
[
"Total sample size [[3]](#ref3).",
"_____no_output_____"
],
[
"$$POP=TP+TN+FN+FP$$",
"_____no_output_____"
]
],
[
[
"cm.POP",
"_____no_output_____"
]
],
[
[
"* <a href=\"https://en.wikipedia.org/wiki/Confusion_matrix\">Wikipedia page</a>",
"_____no_output_____"
],
[
"## Class statistics",
"_____no_output_____"
],
[
"### TPR (True positive rate)",
"_____no_output_____"
],
[
"Sensitivity (also called the true positive rate, the recall, or probability of detection in some fields) measures the proportion of positives that are correctly identified as such (e.g. the percentage of sick people who are correctly identified as having the condition) [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Sensitivity_and_specificity\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$TPR=\\frac{TP}{P}=\\frac{TP}{TP+FN}$$",
"_____no_output_____"
]
],
[
[
"cm.TPR",
"_____no_output_____"
]
],
[
[
"### TNR (True negative rate)",
"_____no_output_____"
],
[
"Specificity (also called the true negative rate) measures the proportion of negatives that are correctly identified as such (e.g. the percentage of healthy people who are correctly identified as not having the condition) [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Sensitivity_and_specificity\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$TNR=\\frac{TN}{N}=\\frac{TN}{TN+FP}$$",
"_____no_output_____"
]
],
[
[
"cm.TNR",
"_____no_output_____"
]
],
[
[
"### PPV (Positive predictive value)",
"_____no_output_____"
],
[
"Positive predictive value (PPV) is the proportion of positives that correspond to\nthe presence of the condition [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Positive_and_negative_predictive_values\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$PPV=\\frac{TP}{TP+FP}$$",
"_____no_output_____"
]
],
[
[
"cm.PPV",
"_____no_output_____"
]
],
[
[
"### NPV (Negative predictive value)",
"_____no_output_____"
],
[
"Negative predictive value (NPV) is the proportion of negatives that correspond to\nthe absence of the condition [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Positive_and_negative_predictive_values\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$NPV=\\frac{TN}{TN+FN}$$",
"_____no_output_____"
]
],
[
[
"cm.NPV",
"_____no_output_____"
]
],
[
[
"### FNR (False negative rate)",
"_____no_output_____"
],
[
"The false negative rate is the proportion of positives which yield negative test outcomes with the test, i.e., the conditional probability of a negative test result given that the condition being looked for is present [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/False_positives_and_false_negatives#False_positive_and_false_negative_rates\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$FNR=\\frac{FN}{P}=\\frac{FN}{FN+TP}=1-TPR$$",
"_____no_output_____"
]
],
[
[
"cm.FNR",
"_____no_output_____"
]
],
[
[
"### FPR (False positive rate)",
"_____no_output_____"
],
[
"The false positive rate is the proportion of all negatives that still yield positive test outcomes, i.e., the conditional probability of a positive test result given an event that was not present [[3]](#ref3).\n\nThe false positive rate is equal to the significance level. The specificity of the test is equal to $ 1 $ minus the false positive rate.\n\n<a href=\"https://en.wikipedia.org/wiki/False_positives_and_false_negatives#False_positive_and_false_negative_rates\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$FPR=\\frac{FP}{N}=\\frac{FP}{FP+TN}=1-TNR$$",
"_____no_output_____"
]
],
[
[
"cm.FPR",
"_____no_output_____"
]
],
[
[
"### FDR (False discovery rate)",
"_____no_output_____"
],
[
"The false discovery rate (FDR) is a method of conceptualizing the rate of type I errors in null hypothesis testing when conducting multiple comparisons. FDR-controlling procedures are designed to control the expected proportion of \"discoveries\" (rejected null hypotheses) that are false (incorrect rejections) [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/False_discovery_rate\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$FDR=\\frac{FP}{FP+TP}=1-PPV$$",
"_____no_output_____"
]
],
[
[
"cm.FDR",
"_____no_output_____"
]
],
[
[
"### FOR (False omission rate)",
"_____no_output_____"
],
[
"False omission rate (FOR) is a statistical method used in multiple hypothesis testing to correct for multiple comparisons and it is the complement of the negative predictive value. It measures the proportion of false negatives which are incorrectly rejected [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Positive_and_negative_predictive_values\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$FOR=\\frac{FN}{FN+TN}=1-NPV$$",
"_____no_output_____"
]
],
[
[
"cm.FOR",
"_____no_output_____"
]
],
[
[
"### ACC (Accuracy)",
"_____no_output_____"
],
[
"The accuracy is the number of correct predictions from all predictions made [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Accuracy_and_precision\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$ACC=\\frac{TP+TN}{P+N}=\\frac{TP+TN}{TP+TN+FP+FN}$$",
"_____no_output_____"
]
],
[
[
"cm.ACC",
"_____no_output_____"
]
],
[
[
"### ERR (Error rate)",
"_____no_output_____"
],
[
"The error rate is the number of incorrect predictions from all predictions made [[3]](#ref3).",
"_____no_output_____"
],
[
"$$ERR=\\frac{FP+FN}{P+N}=\\frac{FP+FN}{TP+TN+FP+FN}=1-ACC$$",
"_____no_output_____"
]
],
[
[
"cm.ERR",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### FBeta-Score",
"_____no_output_____"
],
[
"In statistical analysis of classification, the F1 score (also F-score or F-measure) is a measure of a test's accuracy. It considers both the precision $ p $ and the recall $ r $ of the test to compute the score.\nThe F1 score is the harmonic average of the precision and recall, where F1 score reaches its best value at $ 1 $ (perfect precision and recall) and worst at $ 0 $ [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/F1_score\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$F_{\\beta}=(1+\\beta^2)\\times \\frac{PPV\\times TPR}{(\\beta^2 \\times PPV)+TPR}=\\frac{(1+\\beta^2) \\times TP}{(1+\\beta^2)\\times TP+FP+\\beta^2 \\times FN}$$",
"_____no_output_____"
]
],
[
[
"cm.F1",
"_____no_output_____"
],
[
"cm.F05",
"_____no_output_____"
],
[
"cm.F2",
"_____no_output_____"
],
[
"cm.F_beta(beta=4)",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `beta` : beta parameter (type : `float`)",
"_____no_output_____"
],
[
"#### Output",
"_____no_output_____"
],
[
"`{class1: FBeta-Score1, class2: FBeta-Score2, ...}`",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### MCC (Matthews correlation coefficient)",
"_____no_output_____"
],
[
"The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications, introduced by biochemist Brian W. Matthews in 1975. It takes into account true and false positives and negatives and is generally regarded as a balanced measure that can be used even if the classes are of very different sizes. The MCC is, in essence, a correlation coefficient between the observed and predicted binary classifications; it returns a value between $ −1 $ and $ +1 $. A coefficient of $ +1 $ represents a perfect prediction, $ 0 $ no better than random prediction and $ −1 $ indicates total disagreement between prediction and observation [[27]](#ref27).\n\n<a href=\"#MCCI-(Matthews-correlation-coefficient-interpretation)\">Interpretation</a>\n\n<a href=\"https://en.wikipedia.org/wiki/Matthews_correlation_coefficient\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$MCC=\\frac{TP \\times TN-FP \\times FN}{\\sqrt{(TP+FP)\\times (TP+FN)\\times (TN+FP)\\times (TN+FN)}}$$",
"_____no_output_____"
]
],
[
[
"cm.MCC",
"_____no_output_____"
]
],
[
[
"### BM (Bookmaker informedness)",
"_____no_output_____"
],
[
"The informedness of a prediction method as captured by a contingency matrix is defined as the probability that the prediction method will make a correct decision as opposed to guessing and is calculated using the bookmaker algorithm [[2]](#ref2).\n\nEquals to <a href=\"#Y-(Youden-index)\">Youden Index</a>",
"_____no_output_____"
],
[
"$$BM=TPR+TNR-1$$",
"_____no_output_____"
]
],
[
[
"cm.BM",
"_____no_output_____"
]
],
[
[
"### MK (Markedness)",
"_____no_output_____"
],
[
"In statistics and psychology, the social science concept of markedness is quantified as a measure of how much one variable is marked as a predictor or possible cause of another and is also known as $ \\triangle P $ in simple two-choice cases [[2]](#ref2).",
"_____no_output_____"
],
[
"$$MK=PPV+NPV-1$$",
"_____no_output_____"
]
],
[
[
"cm.MK",
"_____no_output_____"
]
],
[
[
"### PLR (Positive likelihood ratio)",
"_____no_output_____"
],
[
"Likelihood ratios are used for assessing the value of performing a diagnostic test. They use the sensitivity and specificity of the test to determine whether a test result usefully changes the probability that a condition (such as a disease state) exists. The first description of the use of likelihood ratios for decision rules was made at a symposium on information theory in 1954 [[28]](#ref28).\n\n<a href=\"#PLRI-(Positive-likelihood-ratio-interpretation)\">Interpretation</a>\n\n<a href=\"https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$LR_+=PLR=\\frac{TPR}{FPR}$$",
"_____no_output_____"
]
],
[
[
"cm.PLR",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `LR+` renamed to `PLR` in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### NLR (Negative likelihood ratio)",
"_____no_output_____"
],
[
"Likelihood ratios are used for assessing the value of performing a diagnostic test. They use the sensitivity and specificity of the test to determine whether a test result usefully changes the probability that a condition (such as a disease state) exists. The first description of the use of likelihood ratios for decision rules was made at a symposium on information theory in 1954 [[28]](#ref28).\n\n<a href=\"#NLRI-(Negative-likelihood-ratio-interpretation)\">Interpretation</a>\n\n<a href=\"https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$LR_-=NLR=\\frac{FNR}{TNR}$$",
"_____no_output_____"
]
],
[
[
"cm.NLR",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `LR-` renamed to `NLR` in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### DOR (Diagnostic odds ratio)",
"_____no_output_____"
],
[
"The diagnostic odds ratio is a measure of the effectiveness of a diagnostic test. It is defined as the ratio of the odds of the test being positive if the subject has a disease relative to the odds of the test being positive if the subject does not have the disease [[28]](#ref28).\n\n<a href=\"https://en.wikipedia.org/wiki/Diagnostic_odds_ratio\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$DOR=\\frac{LR_+}{LR_-}$$",
"_____no_output_____"
]
],
[
[
"cm.DOR",
"_____no_output_____"
]
],
[
[
"### PRE (Prevalence)",
"_____no_output_____"
],
[
"Prevalence is a statistical concept referring to the number of cases of a disease that are present in a particular population at a given time (Reference Likelihood) [[14]](#ref14).\n\n<a href=\"https://en.wikipedia.org/wiki/Prevalence\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$Prevalence=\\frac{P}{POP}$$",
"_____no_output_____"
]
],
[
[
"cm.PRE",
"_____no_output_____"
]
],
[
[
"### G (G-measure)",
"_____no_output_____"
],
[
"The geometric mean of precision and sensitivity, also known as Fowlkes–Mallows index [[3]](#ref3).\n\n<a href=\"https://en.wikipedia.org/wiki/Fowlkes%E2%80%93Mallows_index\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$G=\\sqrt{PPV\\times TPR}$$",
"_____no_output_____"
]
],
[
[
"cm.G",
"_____no_output_____"
]
],
[
[
"### RACC (Random accuracy)",
"_____no_output_____"
],
[
"The expected accuracy from a strategy of randomly guessing categories according to reference and response distributions [[24]](#ref24).",
"_____no_output_____"
],
[
"$$RACC=\\frac{TOP \\times P}{POP^2}$$",
"_____no_output_____"
]
],
[
[
"cm.RACC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### RACCU (Random accuracy unbiased)",
"_____no_output_____"
],
[
"The expected accuracy from a strategy of randomly guessing categories according to the average of the reference and response distributions [[25]](#ref25).",
"_____no_output_____"
],
[
"$$RACCU=(\\frac{TOP+P}{2 \\times POP})^2$$",
"_____no_output_____"
]
],
[
[
"cm.RACCU",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### J (Jaccard index)",
"_____no_output_____"
],
[
"The Jaccard index, also known as Intersection over Union and the Jaccard similarity coefficient (originally coined coefficient de communauté by Paul Jaccard), is a statistic used for comparing the similarity and diversity of sample sets [[29]](#ref29).\n\n<a href=\"https://en.wikipedia.org/wiki/Jaccard_index\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$J=\\frac{TP}{TOP+P-TP}$$",
"_____no_output_____"
]
],
[
[
"cm.J",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.9</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### IS (Information score)",
"_____no_output_____"
],
[
"The amount of information needed to correctly classify an example into\nclass C, whose prior probability is $ p(C) $, is defined as $ -\\log_2(p(C)) $ [[18]](#ref18) [[39]](#ref39).",
"_____no_output_____"
],
[
"$$IS=-log_2(\\frac{TP+FN}{POP})+log_2(\\frac{TP}{TP+FP})$$",
"_____no_output_____"
]
],
[
[
"cm.IS",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### CEN (Confusion entropy)",
"_____no_output_____"
],
[
"CEN based upon the concept of entropy for evaluating classifier performances. By exploiting the misclassification information of confusion matrices, the measure evaluates the confusion level of the class distribution of\nmisclassified samples. Both theoretical analysis and statistical results show that the proposed measure is more discriminating than accuracy and RCI while it remains relatively consistent with the two measures. Moreover, it is more capable of measuring how the samples of different classes have been separated from each\nother. Hence the proposed measure is more precise than the two measures and can substitute for them to evaluate classifiers in classification applications [[17]](#ref17).",
"_____no_output_____"
],
[
"$$P_{i,j}^{j}=\\frac{Matrix(i,j)}{\\sum_{k=1}^{|C|}\\Big(Matrix(j,k)+Matrix(k,j)\\Big)}$$",
"_____no_output_____"
],
[
"$$P_{i,j}^{i}=\\frac{Matrix(i,j)}{\\sum_{k=1}^{|C|}\\Big(Matrix(i,k)+Matrix(k,i)\\Big)}$$",
"_____no_output_____"
],
[
"$$CEN_j=-\\sum_{k=1,k\\neq j}^{|C|}\\Bigg(P_{j,k}^jlog_{2(|C|-1)}\\Big(P_{j,k}^j\\Big)+P_{k,j}^jlog_{2(|C|-1)}\\Big(P_{k,j}^j\\Big)\\Bigg)$$",
"_____no_output_____"
]
],
[
[
"cm.CEN",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : $ |C| $ is the number of classes </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### MCEN (Modified confusion entropy)",
"_____no_output_____"
],
[
"Modified version of CEN [[19]](#ref19).",
"_____no_output_____"
],
[
"$$P_{i,j}^{j}=\\frac{Matrix(i,j)}{\\sum_{k=1}^{|C|}\\Big(Matrix(j,k)+Matrix(k,j)\\Big)-Matrix(j,j)}$$",
"_____no_output_____"
],
[
"$$P_{i,j}^{i}=\\frac{Matrix(i,j)}{\\sum_{k=1}^{|C|}\\Big(Matrix(i,k)+Matrix(k,i)\\Big)-Matrix(i,i)}$$",
"_____no_output_____"
],
[
"$$MCEN_j=-\\sum_{k=1,k\\neq j}^{|C|}\\Bigg(P_{j,k}^jlog_{2(|C|-1)}\\Big(P_{j,k}^j\\Big)+P_{k,j}^jlog_{2(|C|-1)}\\Big(P_{k,j}^j\\Big)\\Bigg)$$",
"_____no_output_____"
]
],
[
[
"cm.MCEN",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AUC (Area under the ROC curve)",
"_____no_output_____"
],
[
"The area under the curve (often referred to as simply the AUC) is equal to the probability that a classifier will rank a randomly chosen positive instance higher than a randomly chosen negative one (assuming 'positive' ranks higher than 'negative').\nThus, AUC corresponds to the arithmetic mean of sensitivity and specificity values of each class [[23]](#ref23).\n\n<a href=\"#AUCI-(AUC-value-interpretation)\">Interpretation</a>",
"_____no_output_____"
],
[
"$$AUC=\\frac{TNR+TPR}{2}$$",
"_____no_output_____"
]
],
[
[
"cm.AUC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n <li><span style=\"color:red;\">Notice </span> : this is an approximate calculation of AUC </li>\n</ul>",
"_____no_output_____"
],
[
"### dInd (Distance index)",
"_____no_output_____"
],
[
"Euclidean distance of a ROC point from the top left corner of the ROC space, which can take values between 0 (perfect classification) and $ \\sqrt{2} $ [[23]](#ref23).",
"_____no_output_____"
],
[
"$$dInd=\\sqrt{(1-TNR)^2+(1-TPR)^2}$$",
"_____no_output_____"
]
],
[
[
"cm.dInd",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### sInd (Similarity index)",
"_____no_output_____"
],
[
"sInd is comprised between $ 0 $ (no correct classifications) and $ 1 $ (perfect classification) [[23]](#ref23).",
"_____no_output_____"
],
[
"$$sInd = 1 - \\sqrt{\\frac{(1-TNR)^2+(1-TPR)^2}{2}}$$",
"_____no_output_____"
]
],
[
[
"cm.sInd",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### DP (Discriminant power) ",
"_____no_output_____"
],
[
"Discriminant power (DP) is a measure that summarizes sensitivity and specificity.\nThe DP has been used mainly in feature selection over imbalanced data [[33]](#ref33).\n\n<a href=\"#DPI-(Discriminant-power-interpretation)\">Interpretation</a>",
"_____no_output_____"
],
[
"$$X=\\frac{TPR}{1-TPR}$$",
"_____no_output_____"
],
[
"$$Y=\\frac{TNR}{1-TNR}$$",
"_____no_output_____"
],
[
"$$DP=\\frac{\\sqrt{3}}{\\pi}(log_{10}X+log_{10}Y)$$",
"_____no_output_____"
]
],
[
[
"cm.DP",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Y (Youden index)",
"_____no_output_____"
],
[
"Youden’s index evaluates the algorithm’s ability to avoid failure; it’s derived from sensitivity and\nspecificity and denotes a linear correspondence balanced accuracy.\nAs Youden’s index is a linear transformation of the mean sensitivity and specificity, its values are difficult to\ninterpret, we retain that a higher value of Y indicates better ability to avoid failure. \nYouden’s index has been conventionally used to evaluate tests diagnostic, improve the efficiency of\nTelemedical prevention [[33]](#ref33) [[34]](#ref34).\n\n<a href=\"https://en.wikipedia.org/wiki/Youden%27s_J_statistic\">Wikipedia page</a>\n\nEquals to <a href=\"#BM-(Bookmaker-informedness)\">Bookmaker Informedness</a>",
"_____no_output_____"
],
[
"$$Y=BM=TPR+TNR-1$$",
"_____no_output_____"
]
],
[
[
"cm.Y",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### PLRI (Positive likelihood ratio interpretation)",
"_____no_output_____"
],
[
"For more information visit [[33]](#ref33).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">PLR</td>\n <td style=\"text-align:center\">Model contribution</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 1 ></td>\n <td style=\"text-align:center;background-color:red;\">Negligible</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">1 - 5</td>\n <td style=\"text-align:center;background-color:orange;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">5 - 10</td>\n <td style=\"text-align:center;background-color:yellow;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> > 10 </td>\n <td style=\"text-align:center;background-color:green;\">Good</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.PLRI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### NLRI (Negative likelihood ratio interpretation)",
"_____no_output_____"
],
[
"For more information visit [[48]](#ref48).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">NLR</td>\n <td style=\"text-align:center\">Model contribution</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.5 - 1</td>\n <td style=\"text-align:center;background-color:red;\">Negligible</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.2 - 0.5</td>\n <td style=\"text-align:center;background-color:orange;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.1 - 0.2</td>\n <td style=\"text-align:center;background-color:yellow;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.1 > </td>\n <td style=\"text-align:center;background-color:green;\">Good</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.NLRI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### DPI (Discriminant power interpretation)",
"_____no_output_____"
],
[
"For more information visit [[33]](#ref33).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">DP</td>\n <td style=\"text-align:center\">Model contribution</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 1 > </td>\n <td style=\"text-align:center;background-color:red;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">1 - 2</td>\n <td style=\"text-align:center;background-color:orange;\">Limited</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">2 - 3</td>\n <td style=\"text-align:center;background-color:yellow;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> > 3 </td>\n <td style=\"text-align:center;background-color:green;\">Good</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.DPI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AUCI (AUC value interpretation)",
"_____no_output_____"
],
[
"For more information visit [[33]](#ref33).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">AUC</td>\n <td style=\"text-align:center\">Model performance</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.5 - 0.6</td>\n <td style=\"text-align:center;background-color:red;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.6 - 0.7</td>\n <td style=\"text-align:center;background-color:orange;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.7 - 0.8</td>\n <td style=\"text-align:center;background-color:yellowgreen;\">Good</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.8 - 0.9</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Very Good</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.9 - 1.0</td>\n <td style=\"text-align:center;background-color:green;\">Excellent</td>\n </tr>\n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.AUCI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### MCCI (Matthews correlation coefficient interpretation)",
"_____no_output_____"
],
[
"MCC is a confusion matrix method of calculating the Pearson product-moment correlation coefficient (not to be confused with Pearson's C). Therefore, it has the same interpretation [[2]](#ref2).\n\nFor more information visit [[49]](#ref49).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">MCC</td>\n <td style=\"text-align:center\">Interpretation</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.3 ></td>\n <td style=\"text-align:center;background-color:Red;\">Negligible</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.3 - 0.5</td>\n <td style=\"text-align:center;background-color:orange;\">Weak</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.5 - 0.7</td>\n <td style=\"text-align:center;background-color:yellow;\">Moderate</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.7 - 0.9</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Strong</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.9 - 1.0</td>\n <td style=\"text-align:center;background-color:green;\">Very Strong</td>\n </tr>\n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.MCCI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : only positive values are considered</li>\n</ul>",
"_____no_output_____"
],
[
"### QI (Yule's Q interpretation)",
"_____no_output_____"
],
[
"For more information visit [[67]](#ref67).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Q</td>\n <td style=\"text-align:center\">Interpretation</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.25 > </td>\n <td style=\"text-align:center;background-color:red;\">Negligible</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.25 - 0.5</td>\n <td style=\"text-align:center;background-color:orange;\">Weak</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.5 - 0.75</td>\n <td style=\"text-align:center;background-color:yellow;\">Moderate</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> > 0.75 </td>\n <td style=\"text-align:center;background-color:green;\">Strong</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.QI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### GI (Gini index)",
"_____no_output_____"
],
[
"A chance-standardized variant of the AUC is given by Gini coefficient, taking values between $ 0 $ (no difference\nbetween the score distributions of the two classes) and $ 1 $ (complete separation between the two distributions).\nGini coefficient is widespread use metric in imbalanced data learning [[33]](#ref33). \n\n<a href=\"https://en.wikipedia.org/wiki/Gini_coefficient\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$GI=2\\times AUC-1$$",
"_____no_output_____"
]
],
[
[
"cm.GI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### LS (Lift score)",
"_____no_output_____"
],
[
"In the context of classification, lift compares model predictions to randomly generated predictions. Lift is often used in marketing research combined with gain and lift charts as a visual aid [[35]](#ref35) [[36]](#ref36).",
"_____no_output_____"
],
[
"$$LS=\\frac{PPV}{PRE}$$",
"_____no_output_____"
]
],
[
[
"cm.LS",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.8</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AM (Automatic/Manual)",
"_____no_output_____"
],
[
"Difference between automatic and manual classification i.e., the difference between positive outcomes and of positive samples.",
"_____no_output_____"
],
[
"$$AM=TOP-P=(TP+FP)-(TP+FN)$$",
"_____no_output_____"
]
],
[
[
"cm.AM",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.9</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### BCD (Bray-Curtis dissimilarity)",
"_____no_output_____"
],
[
"In ecology and biology, the Bray–Curtis dissimilarity, named after J. Roger Bray and John T. Curtis, is a statistic used to quantify the compositional dissimilarity between two different sites, based on counts at each site [[37]](#ref37).\n\n<a href=\"https://en.wikipedia.org/wiki/Bray%E2%80%93Curtis_dissimilarity\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$BCD=\\frac{|AM|}{\\sum_{i=1}^{|C|}\\Big(TOP_i+P_i\\Big)}$$",
"_____no_output_____"
]
],
[
[
"cm.BCD",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.9</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### OP (Optimized precision)",
"_____no_output_____"
],
[
"Optimized precision is a type of hybrid threshold metric and has been proposed as a\ndiscriminator for building an optimized heuristic classifier. This metric is a combination of\naccuracy, sensitivity and specificity metrics. The sensitivity and specificity metrics were used for \nstabilizing and optimizing the accuracy performance when dealing with an imbalanced class of two-class problems [[40]](#ref40) [[42]](#ref42).",
"_____no_output_____"
],
[
"$$OP = ACC - \\frac{|TNR-TPR|}{|TNR+TPR|}$$",
"_____no_output_____"
]
],
[
[
"cm.OP",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.0</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### IBA (Index of balanced accuracy)",
"_____no_output_____"
],
[
"The method combines an unbiased index of its overall accuracy and a measure about\nhow dominant is the class with the highest individual accuracy rate [[41]](#ref41) [[42]](#ref42).",
"_____no_output_____"
],
[
"$$IBA_{\\alpha}=(1+\\alpha \\times(TPR-TNR))\\times TNR \\times TPR$$",
"_____no_output_____"
]
],
[
[
"cm.IBA",
"_____no_output_____"
],
[
"cm.IBA_alpha(0.5)",
"_____no_output_____"
],
[
"cm.IBA_alpha(0.1)",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `alpha` : alpha parameter (type : `float`)",
"_____no_output_____"
],
[
"#### Output",
"_____no_output_____"
],
[
"`{class1: IBA1, class2: IBA2, ...}`",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.0</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### GM (G-mean)",
"_____no_output_____"
],
[
"Geometric mean of specificity and sensitivity [[3]](#ref3) [[41]](#ref41) [[42]](#ref42).",
"_____no_output_____"
],
[
"$$GM=\\sqrt{TPR \\times TNR}$$",
"_____no_output_____"
]
],
[
[
"cm.GM",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.0</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Q (Yule's Q)",
"_____no_output_____"
],
[
"In statistics, Yule's Q, also known as the coefficient of colligation, is a measure of association between two binary variables [[45]](#ref45).\n\n<a href=\"#QI-(Yule's-Q-interpretation)\">Interpretation</a>\n\n<a href=\"https://en.wikipedia.org/wiki/Coefficient_of_colligation\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$OR = \\frac{TP\\times TN}{FP\\times FN}$$",
"_____no_output_____"
],
[
"$$Q = \\frac{OR-1}{OR+1}$$",
"_____no_output_____"
]
],
[
[
"cm.Q",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AGM (Adjusted G-mean)",
"_____no_output_____"
],
[
"An adjusted version of the geometric mean of specificity and sensitivity [[46]](#ref46).",
"_____no_output_____"
],
[
"$$N_n=\\frac{N}{POP}$$",
"_____no_output_____"
],
[
"$$AGM=\\frac{GM+TNR\\times N_n}{1+N_n};TPR>0$$",
"_____no_output_____"
],
[
"$$AGM=0;TPR=0$$",
"_____no_output_____"
]
],
[
[
"cm.AGM",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AGF (Adjusted F-score)",
"_____no_output_____"
],
[
"The F-measures used only three of the four elements of the confusion matrix and hence two classifiers with different TNR values may have the same F-score. Therefore, the AGF metric is introduced to use all elements of the confusion matrix and provide more weights to samples which are correctly classified in the minority class [[50]](#ref50).",
"_____no_output_____"
],
[
"$$AGF=\\sqrt{F_2 \\times InvF_{0.5}}$$",
"_____no_output_____"
],
[
"$$F_{2}=5\\times \\frac{PPV\\times TPR}{(4 \\times PPV)+TPR}$$",
"_____no_output_____"
],
[
"$$InvF_{0.5}=(1+0.5^2)\\times \\frac{NPV\\times TNR}{(0.5^2 \\times NPV)+TNR}$$",
"_____no_output_____"
]
],
[
[
"cm.AGF",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### OC (Overlap coefficient)",
"_____no_output_____"
],
[
"The overlap coefficient, or Szymkiewicz–Simpson coefficient, is a similarity measure that measures the overlap between two finite sets. It is defined as the size of the intersection divided by the smaller of the size of the two sets [[52]](#ref52).\n\n<a href=\"https://en.wikipedia.org/wiki/Overlap_coefficient\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$OC=\\frac{TP}{min(TOP,P)}=max(PPV,TPR)$$",
"_____no_output_____"
]
],
[
[
"cm.OC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### OOC (Otsuka-Ochiai coefficient)",
"_____no_output_____"
],
[
"In biology, there is a similarity index, known as the Otsuka-Ochiai coefficient named after Yanosuke Otsuka and Akira Ochiai, also known as the Ochiai-Barkman or Ochiai coefficient. If sets are represented as bit vectors, the Otsuka-Ochiai coefficient can be seen to be the same as the cosine similarity [[53]](#ref53).\n\n<a href=\"https://en.wikipedia.org/wiki/Yanosuke_Otsuka\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$OOC=\\frac{TP}{\\sqrt{TOP\\times P}}$$",
"_____no_output_____"
]
],
[
[
"cm.OOC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### TI (Tversky index)",
"_____no_output_____"
],
[
"The Tversky index, named after Amos Tversky, is an asymmetric similarity measure on sets that compares a variant to a prototype. The Tversky index can be seen as a generalization of Dice's coefficient and Tanimoto coefficient [[54]](#ref54).\n\n<a href=\"https://en.wikipedia.org/wiki/Tversky_index\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$TI(\\alpha,\\beta)=\\frac{TP}{TP+\\alpha FN+\\beta FP}$$",
"_____no_output_____"
]
],
[
[
"cm.TI(2,3)",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `alpha` : alpha coefficient (type : `float`)\n2. `beta` : beta coefficient (type : `float`)",
"_____no_output_____"
],
[
"#### Output",
"_____no_output_____"
],
[
"`{class1: TI1, class2: TI2, ...}`",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AUPR (Area under the PR curve)",
"_____no_output_____"
],
[
"A PR curve is plotting precision against recall. The precision recall area under curve (AUPR) is just the area under the PR curve. The higher it is, the better the model is [[55]](#ref55) [[56]](#ref56).\n\n",
"_____no_output_____"
],
[
"$$AUPR=\\frac{TPR+PPV}{2}$$",
"_____no_output_____"
]
],
[
[
"cm.AUPR",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.4</span> </li>\n <li><span style=\"color:red;\">Notice </span> : this is an approximate calculation of AUPR </li>\n</ul>",
"_____no_output_____"
],
[
"### ICSI (Individual classification success index)",
"_____no_output_____"
],
[
"The Individual Classification Success Index (ICSI), is a\nclass-specific symmetric measure defined for classification\nassessment purpose. ICSI is hence $ 1 $ minus the sum of type I and type II errors.\nIt ranges from $ -1 $ (both errors are maximal, i.e. $ 1 $) to $ 1 $ (both\nerrors are minimal, i.e. $ 0 $), but the value $ 0 $ does not have any\nclear meaning. The measure is symmetric, and linearly related\nto the arithmetic mean of TPR and PPV [[58]](#ref58).",
"_____no_output_____"
],
[
"$$ICSI=PPV+TPR-1$$",
"_____no_output_____"
]
],
[
[
"cm.ICSI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### CI (Confidence interval)",
"_____no_output_____"
],
[
"In statistics, a confidence interval (CI) is a type of interval estimate (of a population parameter) that is computed from the observed data. The confidence level is the frequency (i.e., the proportion) of possible confidence intervals that contain the true value of their corresponding parameter. In other words, if confidence intervals are constructed using a given confidence level in an infinite number of independent experiments, the proportion of those intervals that contain the true value of the parameter will match the confidence level [[31]](#ref31).\n\nSupported statistics : `ACC`,`AUC`,`PRE`,`Overall ACC`,`Kappa`,`TPR`,`TNR`,`PPV`,`NPV`,`PLR`,`NLR`\n\nSupported alpha values (two-sided) : 0.001, 0.002, 0.01, 0.02, 0.05, 0.1, 0.2\n\nSupported alpha values (one-sided) : 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1",
"_____no_output_____"
],
[
"Confidence intervals for `TPR`,`TNR`,`PPV`,`NPV`,`ACC`,`PRE` and `Overall ACC` are calculated using the normal approximation to the binomial distribution [[59]](#ref59), Wilson score [[62]](#ref62) and Agresti-Coull method [[63]](#ref63): \n\n#### Normal approximation:\n\n$$SE=\\sqrt{\\frac{\\hat{p}(1-\\hat{p})}{n}}$$\n\n$$CI=\\hat{p}\\pm z\\times SE$$\n\n$$n=\\begin{cases}P & \\hat{p} == TPR/FNR\\\\N & \\hat{p} == TNR/FPR\\\\TOP & \\hat{p} == PPV\\\\TON & \\hat{p} ==NPV \\\\POP& \\hat{p} == ACC/ACC_{Overall}\\end{cases}$$",
"_____no_output_____"
],
[
"#### Wilson score:\n\n$$CI=\\frac{\\hat{p}+\\frac{z^2}{2n}}{1+\\frac{z^2}{n}}\\pm\\frac{z}{1+\\frac{z^2}{n}}\\sqrt{\\frac{\\hat{p}(1-\\hat{p})}{n}+\\frac{z^2}{4n^2}}$$\n",
"_____no_output_____"
],
[
"#### Agresti-Coull:\n\n$$\\hat{p}=\\frac{x}{n}$$\n\n$$\\tilde{p}=\\frac{x+\\frac{z^2}{2}}{n+z^2}$$\n\n$$CI =\\tilde{p}\\pm\\sqrt{\\frac{\\tilde{p}(1-\\tilde{p})}{n+z^2}}$$",
"_____no_output_____"
],
[
"Confidence interval for `Kappa` are calculated using Fleiss formula [[24]](#ref24) [[38]](#ref38) :\n\n$$SE_{Kappa}=\\sqrt{\\frac{ACC_{Overall}\\times (1-RACC_{Overall})}{(1-RACC_{Overall})^2}}$$\n\n$$CI_{Kappa}=Kappa\\pm z\\times SE_{Kappa}$$",
"_____no_output_____"
],
[
"Confidence intervals for `NLR` and `PLR` are calculated using the log method [[60]](#ref60) :\n\n$$SE_{LR}=\\sqrt{\\frac{1}{a}-\\frac{1}{b}+\\frac{1}{c}-\\frac{1}{d}}$$\n\n$$CI_{LR}=e^{ln(LR)\\pm z\\times SE_{LR}}$$\n\n$$PLR:\\begin{cases}a=TP\\\\b=P\\\\c=FP\\\\d=N\\end{cases}$$\n\n$$NLR:\\begin{cases}a=FN\\\\b=P\\\\c=TN\\\\d=N\\end{cases}$$\n",
"_____no_output_____"
],
[
"Confidence interval for `AUC` is calculated using Hanley and McNeil formula [[61]](#ref61) :\n\n$$SE_{AUC}=\\sqrt{\\frac{q_0+(N-1)q_1+(P-1)q_2}{N\\times P}}$$\n\n$$q_0=AUC(1-AUC)$$\n\n$$q_1=\\frac{AUC}{2-AUC}-AUC^2$$\n\n$$q_2=\\frac{2AUC^2}{1+AUC}-AUC^2$$\n\n$$CI_{AUC}=AUC\\pm z\\times SE_{AUC}$$",
"_____no_output_____"
]
],
[
[
"cm.CI(\"TPR\")",
"_____no_output_____"
],
[
"cm.CI(\"FNR\",alpha=0.001,one_sided=True)",
"_____no_output_____"
],
[
"cm.CI(\"PRE\",alpha=0.05,binom_method=\"wilson\")",
"_____no_output_____"
],
[
"cm.CI(\"Overall ACC\",alpha=0.02,binom_method=\"agresti-coull\")",
"_____no_output_____"
],
[
"cm.CI(\"Overall ACC\",alpha=0.05)",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `param` : input parameter (type : `str`)\n2. `alpha` : type I error (type : `float`, default : `0.05`)\n3. `one_sided` : one-sided mode (type : `bool`, default : `False`)\n4. `binom_method` : binomial confidence intervals method (type : `str`, default : `normal-approx`)",
"_____no_output_____"
],
[
"#### Output",
"_____no_output_____"
],
[
"1. Two-sided : `{class1: [SE1, (Lower CI, Upper CI)], ...}`\n2. One-sided : `{class1: [SE1, (Lower one-sided CI, Upper one-sided CI)], ...}`",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : For more information visit <a href=\"#Example-8-(Confidence-interval)\">Example 8</a></li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### NB (Net benefit)",
"_____no_output_____"
],
[
"NB is a weighted sum of true positive\nclassifications with compensation for false positive classifications by giving\nthese a weight $ w $ [[64]](#ref64) [[65]](#ref65).",
"_____no_output_____"
],
[
"$$NB=\\frac{TP-w\\times FP}{POP}$$",
"_____no_output_____"
],
[
"Vickers and Elkin (2006) suggested considering a range of thresholds and\ncalculating the NB across these thresholds. The results can be plotted in a\ndecision curve [[66]](#ref66).",
"_____no_output_____"
],
[
"$$p_t=threshold$$\n$$w=\\frac{p_t}{1-p_t}$$",
"_____no_output_____"
]
],
[
[
"cm.NB(w=0.059)",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `w` : weight",
"_____no_output_____"
],
[
"#### Output",
"_____no_output_____"
],
[
"`{class1: NB1, class2: NB2, ...}`",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"## Overall statistics",
"_____no_output_____"
],
[
"### Kappa",
"_____no_output_____"
],
[
"Kappa is a statistic that measures inter-rater agreement for qualitative (categorical) items. It is generally thought to be a more robust measure than simple percent agreement calculation, as kappa takes into account the possibility of the agreement occurring by chance [[24]](#ref24).\n\n<a href=\"#SOA1-(Landis-&-Koch's-benchmark)\">Benchmark1</a>\n<a href=\"#SOA2-(Fleiss'-benchmark)\">Benchmark2</a>\n<a href=\"#SOA3-(Altman's-benchmark)\">Benchmark3</a>\n<a href=\"#SOA4-(Cicchetti's-benchmark)\">Benchmark4</a>\n\n<a href=\"https://en.wikipedia.org/wiki/Cohen%27s_kappa\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$Kappa=\\frac{ACC_{Overall}-RACC_{Overall}}{1-RACC_{Overall}}$$",
"_____no_output_____"
]
],
[
[
"cm.Kappa",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Kappa unbiased",
"_____no_output_____"
],
[
"The unbiased kappa value is defined in terms of total accuracy and a slightly different computation of expected likelihood that averages the reference and response probabilities [[25]](#ref25).\n\nEquals to [Scott's Pi](#Scott's-Pi)",
"_____no_output_____"
],
[
"$$Kappa_{Unbiased}=\\frac{ACC_{Overall}-RACCU_{Overall}}{1-RACCU_{Overall}}$$",
"_____no_output_____"
]
],
[
[
"cm.KappaUnbiased",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Kappa no prevalence",
"_____no_output_____"
],
[
"The kappa statistic adjusted for prevalence [[14]](#ref14).",
"_____no_output_____"
],
[
"$$Kappa_{NoPrevalence}=2 \\times ACC_{Overall}-1$$",
"_____no_output_____"
]
],
[
[
"cm.KappaNoPrevalence",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Kappa standard error",
"_____no_output_____"
],
[
"The standard error(s) of the Kappa coefficient was obtained by Fleiss (1969) [[24]](#ref24) [[38]](#ref38).",
"_____no_output_____"
],
[
"$$SE_{Kappa}=\\sqrt{\\frac{ACC_{Overall}\\times (1-RACC_{Overall})}{(1-RACC_{Overall})^2}}$$",
"_____no_output_____"
]
],
[
[
"cm.Kappa_SE",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Kappa 95% CI",
"_____no_output_____"
],
[
"Kappa 95% Confidence Interval [[24]](#ref24) [[38]](#ref38).",
"_____no_output_____"
],
[
"$$CI_{Kappa}=Kappa \\pm 1.96\\times SE_{Kappa}$$",
"_____no_output_____"
]
],
[
[
"cm.Kappa_CI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Chi-squared",
"_____no_output_____"
],
[
"Pearson's chi-squared test is a statistical test applied to sets of categorical data to evaluate how likely it is that any observed difference between the sets arose by chance. It is suitable for unpaired data from large samples [[10]](#ref10).\n\n<a href=\"https://en.wikipedia.org/wiki/Chi-squared_test\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$\\chi^2=\\sum_{i=1}^{|C|}\\sum_{j=1}^{|C|}\\frac{\\Big(Matrix(i,j)-E(i,j)\\Big)^2}{E(i,j)}$$",
"_____no_output_____"
],
[
"$$E(i,j)=\\frac{TOP_j\\times P_i}{POP}$$",
"_____no_output_____"
]
],
[
[
"cm.Chi_Squared",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Chi-squared DF",
"_____no_output_____"
],
[
"Number of degrees of freedom of this confusion matrix for the chi-squared statistic [[10]](#ref10).",
"_____no_output_____"
],
[
"$$DF=(|C|-1)^2$$",
"_____no_output_____"
]
],
[
[
"cm.DF",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Phi-squared",
"_____no_output_____"
],
[
"In statistics, the phi coefficient (or mean square contingency coefficient) is a measure of association for two binary variables. Introduced by Karl Pearson, this measure is similar to the Pearson correlation coefficient in its interpretation. In fact, a Pearson correlation coefficient estimated for two binary variables will return the phi coefficient [[10]](#ref10).\n\n<a href=\"https://en.wikipedia.org/wiki/Phi_coefficient\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$\\phi^2=\\frac{\\chi^2}{POP}$$",
"_____no_output_____"
]
],
[
[
"cm.Phi_Squared",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Cramer's V",
"_____no_output_____"
],
[
"In statistics, Cramér's V (sometimes referred to as Cramér's phi) is a measure of association between two nominal variables, giving a value between $ 0 $ and $ +1 $ (inclusive). It is based on Pearson's chi-squared statistic and was published by Harald Cramér in 1946 [[26]](#ref26).\n\n<a href=\"#SOA5-(Cramer's-benchmark)\">Benchmark</a>\n\n<a href=\"https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$V=\\sqrt{\\frac{\\phi^2}{|C|-1}}$$",
"_____no_output_____"
]
],
[
[
"cm.V",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Standard error",
"_____no_output_____"
],
[
"The standard error (SE) of a statistic (usually an estimate of a parameter) is the standard deviation of its sampling distribution or an estimate of that standard deviation [[31]](#ref31).\n\n<a href=\"https://en.wikipedia.org/wiki/Standard_error\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$SE_{ACC}=\\sqrt{\\frac{ACC\\times (1-ACC)}{POP}}$$",
"_____no_output_____"
]
],
[
[
"cm.SE",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### 95% CI",
"_____no_output_____"
],
[
"In statistics, a confidence interval (CI) is a type of interval estimate (of a population parameter) that is computed from the observed data. The confidence level is the frequency (i.e., the proportion) of possible confidence intervals that contain the true value of their corresponding parameter. In other words, if confidence intervals are constructed using a given confidence level in an infinite number of independent experiments, the proportion of those intervals that contain the true value of the parameter will match the confidence level [[31]](#ref31).\n\n<a href=\"https://en.wikipedia.org/wiki/Confidence_interval\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$CI=ACC \\pm 1.96\\times SE_{ACC}$$",
"_____no_output_____"
]
],
[
[
"cm.CI95",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `CI` renamed to `CI95` in <span style=\"color:red;\">version 2.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Bennett's S",
"_____no_output_____"
],
[
"Bennett, Alpert & Goldstein’s S is a statistical measure of inter-rater agreement. It was created by Bennett et al. in 1954.\nBennett et al. suggested adjusting inter-rater reliability to accommodate the percentage of rater agreement that might be expected by chance was a better measure than a simple agreement between raters [[8]](#ref8).\n\n<a href=\"https://en.wikipedia.org/wiki/Bennett,_Alpert,_and_Goldstein%E2%80%99s_S\">Wikipedia Page</a>",
"_____no_output_____"
],
[
"$$p_c=\\frac{1}{|C|}$$",
"_____no_output_____"
],
[
"$$S=\\frac{ACC_{Overall}-p_c}{1-p_c}$$",
"_____no_output_____"
]
],
[
[
"cm.S",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Scott's Pi",
"_____no_output_____"
],
[
"Scott's pi (named after William A. Scott) is a statistic for measuring inter-rater reliability for nominal data in communication studies. Textual entities are annotated with categories by different annotators, and various measures are used to assess the extent of agreement between the annotators, one of which is Scott's pi. Since automatically annotating text is a popular problem in natural language processing, and the goal is to get the computer program that is being developed to agree with the humans in the annotations it creates, assessing the extent to which humans agree with each other is important for establishing a reasonable upper limit on computer performance [[7]](#ref7).\n\n<a href=\"https://en.wikipedia.org/wiki/Scott%27s_Pi\">Wikipedia page</a>\n\n\nEquals to [Kappa Unbiased](#Kappa-unbiased)",
"_____no_output_____"
],
[
"$$p_c=\\sum_{i=1}^{|C|}(\\frac{TOP_i + P_i}{2\\times POP})^2$$",
"_____no_output_____"
],
[
"$$\\pi=\\frac{ACC_{Overall}-p_c}{1-p_c}$$",
"_____no_output_____"
]
],
[
[
"cm.PI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Gwet's AC1",
"_____no_output_____"
],
[
"AC1 was originally introduced by Gwet in 2001 (Gwet, 2001). The interpretation of AC1 is similar to generalized kappa (Fleiss, 1971), which is used to assess inter-rater reliability when there are multiple raters. Gwet (2002) demonstrated that AC1 can overcome the limitations that kappa is sensitive to trait prevalence and rater's classification probabilities (i.e., marginal probabilities), whereas AC1 provides more robust measure of inter-rater reliability [[6]](#ref6).",
"_____no_output_____"
],
[
"$$\\pi_i=\\frac{TOP_i + P_i}{2\\times POP}$$",
"_____no_output_____"
],
[
"$$p_c=\\frac{1}{|C|-1}\\sum_{i=1}^{|C|}\\Big(\\pi_i\\times (1-\\pi_i)\\Big)$$",
"_____no_output_____"
],
[
"$$AC_1=\\frac{ACC_{Overall}-p_c}{1-p_c}$$",
"_____no_output_____"
]
],
[
[
"cm.AC1",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Reference entropy",
"_____no_output_____"
],
[
"The entropy of the decision problem itself as defined by the counts for the reference. The entropy of a distribution is the average negative log probability of outcomes [[30]](#ref30).",
"_____no_output_____"
],
[
"$$Likelihood_{Reference}=\\frac{P_i}{POP}$$",
"_____no_output_____"
],
[
"$$Entropy_{Reference}=-\\sum_{i=1}^{|C|}Likelihood_{Reference}(i)\\times\\log_{2}{Likelihood_{Reference}(i)}$$",
"_____no_output_____"
],
[
"$$0\\times\\log_{2}{0}\\equiv0$$",
"_____no_output_____"
]
],
[
[
"cm.ReferenceEntropy",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Response entropy",
"_____no_output_____"
],
[
"The entropy of the response distribution. The entropy of a distribution is the average negative log probability of outcomes [[30]](#ref30).",
"_____no_output_____"
],
[
"$$Likelihood_{Response}=\\frac{TOP_i}{POP}$$",
"_____no_output_____"
],
[
"$$Entropy_{Response}=-\\sum_{i=1}^{|C|}Likelihood_{Response}(i)\\times\\log_{2}{Likelihood_{Response}(i)}$$",
"_____no_output_____"
],
[
"$$0\\times\\log_{2}{0}\\equiv0$$",
"_____no_output_____"
]
],
[
[
"cm.ResponseEntropy",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Cross entropy",
"_____no_output_____"
],
[
"The cross-entropy of the response distribution against the reference distribution. The cross-entropy is defined by the negative log probabilities of the response distribution weighted by the reference distribution [[30]](#ref30).\n\n<a href=\"https://en.wikipedia.org/wiki/Cross_entropy\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$Likelihood_{Reference}=\\frac{P_i}{POP}$$",
"_____no_output_____"
],
[
"$$Likelihood_{Response}=\\frac{TOP_i}{POP}$$",
"_____no_output_____"
],
[
"$$Entropy_{Cross}=-\\sum_{i=1}^{|C|}Likelihood_{Reference}(i)\\times\\log_{2}{Likelihood_{Response}(i)}$$",
"_____no_output_____"
],
[
"$$0\\times\\log_{2}{0}\\equiv0$$",
"_____no_output_____"
]
],
[
[
"cm.CrossEntropy",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Joint entropy",
"_____no_output_____"
],
[
"The entropy of the joint reference and response distribution as defined by the underlying matrix [[30]](#ref30).",
"_____no_output_____"
],
[
"$$P^{'}(i,j)=\\frac{Matrix(i,j)}{POP}$$",
"_____no_output_____"
],
[
"$$Entropy_{Joint}=-\\sum_{i=1}^{|C|}\\sum_{j=1}^{|C|}P^{'}(i,j)\\times\\log_{2}{P^{'}(i,j)}$$",
"_____no_output_____"
],
[
"$$0\\times\\log_{2}{0}\\equiv0$$",
"_____no_output_____"
]
],
[
[
"cm.JointEntropy",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Conditional entropy",
"_____no_output_____"
],
[
"The entropy of the distribution of categories in the response given that the reference category was as specified [[30]](#ref30).\n\n<a href=\"https://en.wikipedia.org/wiki/Conditional_entropy\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$P^{'}(j|i)=\\frac{Matrix(j,i)}{P_i}$$",
"_____no_output_____"
],
[
"$$Entropy_{Conditional}=\\sum_{i=1}^{|C|}\\Bigg(Likelihood_{Reference}(i)\\times\\Big(-\\sum_{j=1}^{|C|}P^{'}(j|i)\\times\\log_{2}{P^{'}(j|i)}\\Big)\\Bigg)$$",
"_____no_output_____"
],
[
"$$0\\times\\log_{2}{0}\\equiv0$$",
"_____no_output_____"
]
],
[
[
"cm.ConditionalEntropy",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Kullback-Leibler divergence",
"_____no_output_____"
],
[
"In mathematical statistics, the Kullback–Leibler divergence (also called relative entropy) is a measure of how one probability distribution diverges from a second, expected probability distribution [[11]](#ref11) [[30]](#ref30).\n\n<a href=\"https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence\">Wikipedia Page</a>",
"_____no_output_____"
],
[
"$$Likelihood_{Response}=\\frac{TOP_i}{POP}$$",
"_____no_output_____"
],
[
"$$Likelihood_{Reference}=\\frac{P_i}{POP}$$",
"_____no_output_____"
],
[
"$$Divergence=-\\sum_{i=1}^{|C|}Likelihood_{Reference}\\times\\log_{2}{\\frac{Likelihood_{Reference}}{Likelihood_{Response}}}$$",
"_____no_output_____"
]
],
[
[
"cm.KL",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Mutual information",
"_____no_output_____"
],
[
"Mutual information is defined as Kullback-Leibler divergence, between the product of the individual distributions and the joint distribution.\nMutual information is symmetric. We could also subtract the conditional entropy of the reference given the response from the reference entropy to get the same result [[11]](#ref11) [[30]](#ref30).\n\n<a href=\"https://en.wikipedia.org/wiki/Mutual_information\">Wikipedia Page</a>",
"_____no_output_____"
],
[
"$$P^{'}(i,j)=\\frac{Matrix(i,j)}{POP}$$",
"_____no_output_____"
],
[
"$$Likelihood_{Reference}=\\frac{P_i}{POP}$$",
"_____no_output_____"
],
[
"$$Likelihood_{Response}=\\frac{TOP_i}{POP}$$",
"_____no_output_____"
],
[
"$$MI=-\\sum_{i=1}^{|C|}\\sum_{j=1}^{|C|}P^{'}(i,j)\\times\\log_{2}\\Big({\\frac{P^{'}(i,j)}{Likelihood_{Reference}(i)\\times Likelihood_{Response}(i) }\\Big)}$$",
"_____no_output_____"
],
[
"$$MI=Entropy_{Response}-Entropy_{Conditional}$$",
"_____no_output_____"
]
],
[
[
"cm.MutualInformation",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Goodman & Kruskal's lambda A",
"_____no_output_____"
],
[
"In probability theory and statistics, Goodman & Kruskal's lambda is a measure of proportional reduction in error in cross tabulation analysis [[12]](#ref12).\n\n<a href=\"https://en.wikipedia.org/wiki/Goodman_and_Kruskal%27s_lambda\">Wikipedia page</a>",
"_____no_output_____"
],
[
"$$\\lambda_A=\\frac{\\sum_{j=1}^{|C|}Max\\Big(Matrix(-,j)\\Big)-Max(P)}{POP-Max(P)}$$",
"_____no_output_____"
]
],
[
[
"cm.LambdaA",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Goodman & Kruskal's lambda B",
"_____no_output_____"
],
[
"In probability theory and statistics, Goodman & Kruskal's lambda is a measure of proportional reduction in error in cross tabulation analysis [[13]](#ref13).\n\n<a href=\"https://en.wikipedia.org/wiki/Goodman_and_Kruskal%27s_lambda\">Wikipedia Page</a>",
"_____no_output_____"
],
[
"$$\\lambda_B=\\frac{\\sum_{i=1}^{|C|}Max\\Big(Matrix(i,-)\\Big)-Max(TOP)}{POP-Max(TOP)}$$",
"_____no_output_____"
]
],
[
[
"cm.LambdaB",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### SOA1 (Landis & Koch's benchmark)",
"_____no_output_____"
],
[
"For more information visit [[1]](#ref1).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Kappa</td>\n <td style=\"text-align:center\">Strength of Agreement</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0 ></td>\n <td style=\"text-align:center;background-color:red;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0 - 0.2</td>\n <td style=\"text-align:center;background-color:orangered;\">Slight</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.2 – 0.4</td>\n <td style=\"text-align:center;background-color:orange;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.4 – 0.6</td>\n <td style=\"text-align:center;background-color:yellow;\">Moderate</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.6 – 0.8</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Substantial</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.8 – 1.0</td>\n <td style=\"text-align:center;background-color:green;\">Almost perfect</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.SOA1",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### SOA2 (Fleiss' benchmark)",
"_____no_output_____"
],
[
"For more information visit [[4]](#ref4).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Kappa</td>\n <td style=\"text-align:center\">Strength of Agreement</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.40 ></td>\n <td style=\"text-align:center;background-color:red;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.40 - 0.75</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Intermediate to Good</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">More than 0.75</td>\n <td style=\"text-align:center;background-color:green;\">Excellent</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.SOA2",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### SOA3 (Altman's benchmark)",
"_____no_output_____"
],
[
"For more information visit [[5]](#ref5).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Kappa</td>\n <td style=\"text-align:center\">Strength of Agreement</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.2 ></td>\n <td style=\"text-align:center;background-color:red;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.2 – 0.4</td>\n <td style=\"text-align:center;background-color:orange;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.4 – 0.6</td>\n <td style=\"text-align:center;background-color:yellow;\">Moderate</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.6 – 0.8</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Good</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.8 – 1.0</td>\n <td style=\"text-align:center;background-color:green;\">Very Good</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.SOA3",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### SOA4 (Cicchetti's benchmark)",
"_____no_output_____"
],
[
"For more information visit [[9]](#ref9).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Kappa</td>\n <td style=\"text-align:center\">Strength of Agreement</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.40 ></td>\n <td style=\"text-align:center;background-color:red;\">Poor</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.40 – 0.59</td>\n <td style=\"text-align:center;background-color:orange;\">Fair</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.59 – 0.74</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Good</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.74 – 1.00</td>\n <td style=\"text-align:center;background-color:green;\">Excellent</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.SOA4",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.7</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### SOA5 (Cramer's benchmark)",
"_____no_output_____"
],
[
"For more information visit [[47]](#ref47).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Cramer's V</td>\n <td style=\"text-align:center\">Strength of Association</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.1 ></td>\n <td style=\"text-align:center;background-color:red;\">Negligible</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.1 – 0.2</td>\n <td style=\"text-align:center;background-color:orange;\">Weak</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.2 – 0.4</td>\n <td style=\"text-align:center;background-color:yellow;\">Moderate</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.4 – 0.6</td>\n <td style=\"text-align:center;background-color:yellowgreen;\">Relatively Strong</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.6 – 0.8</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Strong</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.8 – 1.0</td>\n <td style=\"text-align:center;background-color:green;\">Very Strong</td>\n </tr>\n \n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.SOA5",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### SOA6 (Matthews's benchmark)",
"_____no_output_____"
],
[
"MCC is a confusion matrix method of calculating the Pearson product-moment correlation coefficient (not to be confused with Pearson's C). Therefore, it has the same interpretation [[2]](#ref2).\n\nFor more information visit [[49]](#ref49).",
"_____no_output_____"
],
[
"<table>\n <tr>\n <td style=\"text-align:center\">Overall MCC</td>\n <td style=\"text-align:center\">Strength of Association</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.3 ></td>\n <td style=\"text-align:center;background-color:red;\">Negligible</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.3 - 0.5</td>\n <td style=\"text-align:center;background-color:orange;\">Weak</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.5 - 0.7</td>\n <td style=\"text-align:center;background-color:yellow;\">Moderate</td>\n </tr>\n <tr>\n <td style=\"text-align:center\"> 0.7 - 0.9</td>\n <td style=\"text-align:center;background-color:lawngreen;\">Strong</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">0.9 - 1.0</td>\n <td style=\"text-align:center;background-color:green;\">Very Strong</td>\n </tr>\n \n</table>",
"_____no_output_____"
]
],
[
[
"cm.SOA6",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : only positive values are considered</li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_ACC",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$ACC_{Overall}=\\frac{\\sum_{i=1}^{|C|}TP_i}{POP}$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_ACC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_RACC",
"_____no_output_____"
],
[
"For more information visit [[24]](#ref24).",
"_____no_output_____"
],
[
"$$RACC_{Overall}=\\sum_{i=1}^{|C|}RACC_i$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_RACC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_RACCU",
"_____no_output_____"
],
[
"For more information visit [[25]](#ref25).",
"_____no_output_____"
],
[
"$$RACCU_{Overall}=\\sum_{i=1}^{|C|}RACCU_i$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_RACCU",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.8.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### PPV_Micro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$PPV_{Micro}=\\frac{\\sum_{i=1}^{|C|}TP_i}{\\sum_{i=1}^{|C|}TP_i+FP_i}$$",
"_____no_output_____"
]
],
[
[
"cm.PPV_Micro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### TPR_Micro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$TPR_{Micro}=\\frac{\\sum_{i=1}^{|C|}TP_i}{\\sum_{i=1}^{|C|}TP_i+FN_i}$$",
"_____no_output_____"
]
],
[
[
"cm.TPR_Micro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### TNR_Micro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$TNR_{Micro}=\\frac{\\sum_{i=1}^{|C|}TN_i}{\\sum_{i=1}^{|C|}TN_i+FP_i}$$",
"_____no_output_____"
]
],
[
[
"cm.TNR_Micro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### FPR_Micro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$FPR_{Micro}=\\frac{\\sum_{i=1}^{|C|}FP_i}{\\sum_{i=1}^{|C|}TN_i+FP_i}$$",
"_____no_output_____"
]
],
[
[
"cm.FPR_Micro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### FNR_Micro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$FNR_{Micro}=\\frac{\\sum_{i=1}^{|C|}FN_i}{\\sum_{i=1}^{|C|}TP_i+FN_i}$$",
"_____no_output_____"
]
],
[
[
"cm.FNR_Micro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### F1_Micro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$F_{1_{Micro}}=2\\frac{\\sum_{i=1}^{|C|}TPR_i\\times PPV_i}{\\sum_{i=1}^{|C|}TPR_i+PPV_i}$$",
"_____no_output_____"
]
],
[
[
"cm.F1_Micro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### PPV_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$PPV_{Macro}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}\\frac{TP_i}{TP_i+FP_i}$$",
"_____no_output_____"
]
],
[
[
"cm.PPV_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### TPR_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$TPR_{Macro}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}\\frac{TP_i}{TP_i+FN_i}$$",
"_____no_output_____"
]
],
[
[
"cm.TPR_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### TNR_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$TNR_{Macro}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}\\frac{TN_i}{TN_i+FP_i}$$",
"_____no_output_____"
]
],
[
[
"cm.TNR_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### FPR_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$FPR_{Macro}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}\\frac{FP_i}{TN_i+FP_i}$$",
"_____no_output_____"
]
],
[
[
"cm.FPR_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### FNR_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$FNR_{Macro}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}\\frac{FN_i}{TP_i+FN_i}$$",
"_____no_output_____"
]
],
[
[
"cm.FNR_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### F1_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$F_{1_{Macro}}=\\frac{2}{|C|}\\sum_{i=1}^{|C|}\\frac{TPR_i\\times PPV_i}{TPR_i+PPV_i}$$",
"_____no_output_____"
]
],
[
[
"cm.F1_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### ACC_Macro",
"_____no_output_____"
],
[
"For more information visit [[3]](#ref3).",
"_____no_output_____"
],
[
"$$ACC_{Macro}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}{ACC_i}$$",
"_____no_output_____"
]
],
[
[
"cm.ACC_Macro",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_J",
"_____no_output_____"
],
[
"For more information visit [[29]](#ref29).",
"_____no_output_____"
],
[
"$$J_{Mean}=\\frac{1}{|C|}\\sum_{i=1}^{|C|}J_i$$",
"_____no_output_____"
],
[
"$$J_{Sum}=\\sum_{i=1}^{|C|}J_i$$",
"_____no_output_____"
],
[
"$$J_{Overall}=(J_{Sum},J_{Mean})$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_J",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.9</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Hamming loss",
"_____no_output_____"
],
[
"The average Hamming loss or Hamming distance between two sets of samples [[31]](#ref31).",
"_____no_output_____"
],
[
"$$L_{Hamming}=\\frac{1}{POP}\\sum_{i=1}^{POP}1(y_i \\neq \\widehat{y}_i)$$",
"_____no_output_____"
]
],
[
[
"cm.HammingLoss",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.0</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Zero-one loss",
"_____no_output_____"
],
[
"Zero-one loss is a common loss function used with classification learning. It assigns $ 0 $ to loss for a correct classification and $ 1 $ for an incorrect classification [[31]](#ref31).",
"_____no_output_____"
],
[
"$$L_{0-1}=\\sum_{i=1}^{POP}1(y_i \\neq \\widehat{y}_i)$$",
"_____no_output_____"
]
],
[
[
"cm.ZeroOneLoss",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.1</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### NIR (No information rate)",
"_____no_output_____"
],
[
"Largest class percentage in the data [[57]](#ref57).",
"_____no_output_____"
],
[
"$$NIR=\\frac{1}{POP}Max(P)$$",
"_____no_output_____"
]
],
[
[
"cm.NIR",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### P-Value",
"_____no_output_____"
],
[
"In statistical hypothesis testing, the p-value or probability value is, for a given statistical model, the probability that, when the null hypothesis is true, the statistical summary (such as the absolute value of the sample mean difference between two compared groups) would be greater than or equal to the actual observed results [[31]](#ref31) . \nHere a one-sided binomial test to see if the accuracy is better than the no information rate [[57]](#ref57).\n\n\n\n\n<a href=\"https://en.wikipedia.org/wiki/P-value\">Wikipedia Page</a>",
"_____no_output_____"
],
[
"$$x=\\sum_{i=1}^{|C|}TP_{i}$$",
"_____no_output_____"
],
[
"$$p=NIR$$",
"_____no_output_____"
],
[
"$$n=POP$$",
"_____no_output_____"
],
[
"$$P-Value_{(ACC > NIR)}=1-\\sum_{i=1}^{x}\\left(\\begin{array}{c}n\\\\ i\\end{array}\\right)p^{i}(1-p)^{n-i}$$",
"_____no_output_____"
]
],
[
[
"cm.PValue",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.2</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_CEN",
"_____no_output_____"
],
[
"For more information visit [[17]](#ref17).",
"_____no_output_____"
],
[
"$$P_j=\\frac{\\sum_{k=1}^{|C|}\\Big(Matrix(j,k)+Matrix(k,j)\\Big)}{2\\sum_{k,l=1}^{|C|}Matrix(k,l)}$$",
"_____no_output_____"
],
[
"$$CEN_{Overall}=\\sum_{j=1}^{|C|}P_jCEN_j$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_CEN",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_MCEN",
"_____no_output_____"
],
[
"For more information visit [[19]](#ref19).",
"_____no_output_____"
],
[
"$$\\alpha=\\begin{cases}1 & |C| > 2\\\\0 & |C| = 2\\end{cases}$$",
"_____no_output_____"
],
[
"$$P_j=\\frac{\\sum_{k=1}^{|C|}\\Big(Matrix(j,k)+Matrix(k,j)\\Big)-Matrix(j,j)}{2\\sum_{k,l=1}^{|C|}Matrix(k,l)-\\alpha \\sum_{k=1}^{|C|}Matrix(k,k)}$$",
"_____no_output_____"
],
[
"$$MCEN_{Overall}=\\sum_{j=1}^{|C|}P_jMCEN_j$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_MCEN",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.3</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Overall_MCC",
"_____no_output_____"
],
[
"For more information visit [[20]](#ref20) [[27]](#ref27).\n\n<a href=\"#SOA6-(Matthews's-benchmark)\">Benchmark</a>",
"_____no_output_____"
],
[
"$$MCC_{Overall}=\\frac{cov(X,Y)}{\\sqrt{cov(X,X)\\times cov(Y,Y)}}$$",
"_____no_output_____"
],
[
"$$cov(X,Y)=\\sum_{i,j,k=1}^{|C|}\\Big(Matrix(i,i)Matrix(k,j)-Matrix(j,i)Matrix(i,k)\\Big)$$",
"_____no_output_____"
],
[
"$$cov(X,X) = \\sum_{i=1}^{|C|}\\Bigg[\\Big(\\sum_{j=1}^{|C|}Matrix(j,i)\\Big)\\Big(\\sum_{k,l=1,k\\neq i}^{|C|}Matrix(l,k)\\Big)\\Bigg]$$",
"_____no_output_____"
],
[
"$$cov(Y,Y) = \\sum_{i=1}^{|C|}\\Bigg[\\Big(\\sum_{j=1}^{|C|}Matrix(i,j)\\Big)\\Big(\\sum_{k,l=1,k\\neq i}^{|C|}Matrix(k,l)\\Big)\\Bigg]$$",
"_____no_output_____"
]
],
[
[
"cm.Overall_MCC",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### RR (Global performance index)",
"_____no_output_____"
],
[
"For more information visit [[21]](#ref21).",
"_____no_output_____"
],
[
"$$RR=\\frac{1}{|C|}\\sum_{i,j=1}^{|C|}Matrix(i,j)$$",
"_____no_output_____"
]
],
[
[
"cm.RR",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### CBA (Class balance accuracy)",
"_____no_output_____"
],
[
"As an evaluation tool, CBA creates an overall assessment of\nmodel predictive power by scrutinizing measures simultaneously across each class in a conservative manner that guarantees that a model’s ability to recall observations from each class and\nits ability to do so efficiently won’t fall below the bound [[22]](#ref22) [[51]](#ref51).",
"_____no_output_____"
],
[
"$$CBA=\\frac{\\sum_{i=1}^{|C|}\\frac{Matrix(i,i)}{Max(TOP_i,P_i)}}{|C|}$$",
"_____no_output_____"
]
],
[
[
"cm.CBA",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AUNU",
"_____no_output_____"
],
[
"When dealing with multiclass problems, a global measure of classification performances based on the ROC approach (AUNU) has been proposed as the average of single-class measures [[23]](#ref23).",
"_____no_output_____"
],
[
"$$AUNU=\\frac{\\sum_{i=1}^{|C|}AUC_i}{|C|}$$",
"_____no_output_____"
]
],
[
[
"cm.AUNU",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### AUNP",
"_____no_output_____"
],
[
"Another option (AUNP) is that of averaging the $ AUC_i $ values with weights proportional to the number of samples experimentally belonging to each class, that is, the a priori class distribution [[23]](#ref23).",
"_____no_output_____"
],
[
"$$AUNP=\\sum_{i=1}^{|C|}\\frac{P_i}{POP}AUC_i$$",
"_____no_output_____"
]
],
[
[
"cm.AUNP",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### RCI (Relative classifier information)",
"_____no_output_____"
],
[
"Performance of different classifiers on the same domain can be measured by\ncomparing relative classifier information while classifier information (mutual information) can be used for comparison across different decision problems [[32]](#ref32) [[22]](#ref22).",
"_____no_output_____"
],
[
"$$H_d=-\\sum_{i=1}^{|C|}\\Big(\\frac{\\sum_{l=1}^{|C|}Matrix(i,l)}{\\sum_{h,k=1}^{|C|}Matrix(h,k)}log_2\\frac{\\sum_{l=1}^{|C|}Matrix(i,l)}{\\sum_{h,k=1}^{|C|}Matrix(h,k)}\\Big)=Entropy_{Reference}$$",
"_____no_output_____"
],
[
"$$H_o=\\sum_{j=1}^{|C|}\\Big(\\frac{\\sum_{k=1}^{|C|}Matrix(k,j)}{\\sum_{h,l=0}^{|C|}Matrix(h,l)}H_{oj}\\Big)=Entropy_{Conditional}$$",
"_____no_output_____"
],
[
"$$H_{oj}=-\\sum_{i=1}^{|C|}\\Big(\\frac{Matrix(i,j)}{\\sum_{k=1}^{|C|}Matrix(k,j)}log_2\\frac{Matrix(i,j)}{\\sum_{k=1}^{|C|}Matrix(k,j)}\\Big)$$",
"_____no_output_____"
],
[
"$$RCI=\\frac{H_d-H_o}{H_d}=\\frac{MI}{Entropy_{Reference}}$$",
"_____no_output_____"
]
],
[
[
"cm.RCI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 1.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Pearson's C",
"_____no_output_____"
],
[
"The contingency coefficient is a coefficient of association that tells whether two variables or data sets are independent or dependent of/on each other. It is also known as Pearson’s coefficient (not to be confused with Pearson’s coefficient of skewness) [[43]](#ref43) [[44]](#ref44).",
"_____no_output_____"
],
[
"$$C=\\sqrt{\\frac{\\chi^2}{\\chi^2+POP}}$$",
"_____no_output_____"
]
],
[
[
"cm.C",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.0</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### CSI (Classification success index)",
"_____no_output_____"
],
[
"The Classification Success Index (CSI) is an overall\nmeasure defined by averaging ICSI over all classes [[58]](#ref58).",
"_____no_output_____"
],
[
"$$CSI=\\frac{1}{|C|}\\sum_{i=1}^{|C|}{ICSI_i}$$",
"_____no_output_____"
]
],
[
[
"cm.CSI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"### ARI (Adjusted Rand index)",
"_____no_output_____"
],
[
"The Rand index or Rand measure (named after William M. Rand) in statistics, and in particular in data clustering, is a measure of the similarity between two data clusterings. A form of the Rand index may be defined that is adjusted for the chance grouping of elements, this is the adjusted Rand index. From a mathematical standpoint, Rand index is related to the accuracy, but is applicable even when class labels are not used [[68]](#ref68).\n\nThe Adjusted Rand Index (ARI) is frequently used in cluster validation since it is a measure of agreement between two partitions: one given by the clustering process and the other defined by external criteria, but it can also be used in supervised learning [[69]](#ref69).\n\n<a href=\"https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index\">Wikipedia Page</a>",
"_____no_output_____"
],
[
"$$X=\\frac{\\sum_{i}C_{2}^{P_i}\\times \\sum_{j}C_{2}^{TOP_j}}{C_2^{POP}}$$",
"_____no_output_____"
],
[
"$$ARI=\\frac{\\sum_{i,j}C_{2}^{Matrix(i,j)}-X}{\\frac{1}{2}[\\sum_{i}C_{2}^{P_i} + \\sum_{j}C_{2}^{TOP_j}]-X}$$",
"_____no_output_____"
]
],
[
[
"cm.ARI",
"_____no_output_____"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : $ C_{r}^{n} $ is the number of combinations of $ n $ objects taken $ r $</li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"## Print",
"_____no_output_____"
],
[
"### Full",
"_____no_output_____"
]
],
[
[
"print(cm)",
"Predict L1 L2 L3 \nActual\nL1 3 0 2 \n\nL2 0 1 1 \n\nL3 0 2 3 \n\n\n\n\n\nOverall Statistics : \n\n95% CI (0.30439,0.86228)\nACC Macro 0.72222\nARI 0.09206\nAUNP 0.68571\nAUNU 0.67857\nBennett S 0.375\nCBA 0.47778\nCSI 0.17778\nChi-Squared 6.6\nChi-Squared DF 4\nConditional Entropy 0.97579\nCramer V 0.5244\nCross Entropy 1.58333\nF1 Macro 0.56515\nF1 Micro 0.58333\nFNR Macro 0.43333\nFNR Micro 0.41667\nFPR Macro 0.20952\nFPR Micro 0.20833\nGwet AC1 0.38931\nHamming Loss 0.41667\nJoint Entropy 2.45915\nKL Divergence 0.09998\nKappa 0.35484\nKappa 95% CI (-0.07708,0.78675)\nKappa No Prevalence 0.16667\nKappa Standard Error 0.22036\nKappa Unbiased 0.34426\nLambda A 0.42857\nLambda B 0.16667\nMutual Information 0.52421\nNIR 0.41667\nOverall ACC 0.58333\nOverall CEN 0.46381\nOverall J (1.225,0.40833)\nOverall MCC 0.36667\nOverall MCEN 0.51894\nOverall RACC 0.35417\nOverall RACCU 0.36458\nP-Value 0.18926\nPPV Macro 0.61111\nPPV Micro 0.58333\nPearson C 0.59568\nPhi-Squared 0.55\nRCI 0.35339\nRR 4.0\nReference Entropy 1.48336\nResponse Entropy 1.5\nSOA1(Landis & Koch) Fair\nSOA2(Fleiss) Poor\nSOA3(Altman) Fair\nSOA4(Cicchetti) Poor\nSOA5(Cramer) Relatively Strong\nSOA6(Matthews) Weak\nScott PI 0.34426\nStandard Error 0.14232\nTNR Macro 0.79048\nTNR Micro 0.79167\nTPR Macro 0.56667\nTPR Micro 0.58333\nZero-one Loss 5\n\nClass Statistics :\n\nClasses L1 L2 L3 \nACC(Accuracy) 0.83333 0.75 0.58333 \nAGF(Adjusted F-score) 0.72859 0.62869 0.61009 \nAGM(Adjusted geometric mean) 0.85764 0.70861 0.58034 \nAM(Difference between automatic and manual classification) -2 1 1 \nAUC(Area under the ROC curve) 0.8 0.65 0.58571 \nAUCI(AUC value interpretation) Very Good Fair Poor \nAUPR(Area under the PR curve) 0.8 0.41667 0.55 \nBCD(Bray-Curtis dissimilarity) 0.08333 0.04167 0.04167 \nBM(Informedness or bookmaker informedness) 0.6 0.3 0.17143 \nCEN(Confusion entropy) 0.25 0.49658 0.60442 \nDOR(Diagnostic odds ratio) None 4.0 2.0 \nDP(Discriminant power) None 0.33193 0.16597 \nDPI(Discriminant power interpretation) None Poor Poor \nERR(Error rate) 0.16667 0.25 0.41667 \nF0.5(F0.5 score) 0.88235 0.35714 0.51724 \nF1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545 \nF2(F2 score) 0.65217 0.45455 0.57692 \nFDR(False discovery rate) 0.0 0.66667 0.5 \nFN(False negative/miss/type 2 error) 2 1 2 \nFNR(Miss rate or false negative rate) 0.4 0.5 0.4 \nFOR(False omission rate) 0.22222 0.11111 0.33333 \nFP(False positive/type 1 error/false alarm) 0 2 3 \nFPR(Fall-out or false positive rate) 0.0 0.2 0.42857 \nG(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772 \nGI(Gini index) 0.6 0.3 0.17143 \nGM(G-mean geometric mean of specificity and sensitivity) 0.7746 0.63246 0.58554 \nIBA(Index of balanced accuracy) 0.36 0.28 0.35265 \nICSI(Individual classification success index) 0.6 -0.16667 0.1 \nIS(Information score) 1.26303 1.0 0.26303 \nJ(Jaccard index) 0.6 0.25 0.375 \nLS(Lift score) 2.4 2.0 1.2 \nMCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903 \nMCCI(Matthews correlation coefficient interpretation) Moderate Negligible Negligible \nMCEN(Modified confusion entropy) 0.26439 0.5 0.6875 \nMK(Markedness) 0.77778 0.22222 0.16667 \nN(Condition negative) 7 10 7 \nNLR(Negative likelihood ratio) 0.4 0.625 0.7 \nNLRI(Negative likelihood ratio interpretation) Poor Negligible Negligible \nNPV(Negative predictive value) 0.77778 0.88889 0.66667 \nOC(Overlap coefficient) 1.0 0.5 0.6 \nOOC(Otsuka-Ochiai coefficient) 0.7746 0.40825 0.54772 \nOP(Optimized precision) 0.58333 0.51923 0.55894 \nP(Condition positive or support) 5 2 5 \nPLR(Positive likelihood ratio) None 2.5 1.4 \nPLRI(Positive likelihood ratio interpretation) None Poor Poor \nPOP(Population) 12 12 12 \nPPV(Precision or positive predictive value) 1.0 0.33333 0.5 \nPRE(Prevalence) 0.41667 0.16667 0.41667 \nQ(Yule Q - coefficient of colligation) None 0.6 0.33333 \nQI(Yule Q interpretation) None Moderate Weak \nRACC(Random accuracy) 0.10417 0.04167 0.20833 \nRACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007 \nTN(True negative/correct rejection) 7 8 4 \nTNR(Specificity or true negative rate) 1.0 0.8 0.57143 \nTON(Test outcome negative) 9 9 6 \nTOP(Test outcome positive) 3 3 6 \nTP(True positive/hit) 3 1 3 \nTPR(Sensitivity, recall, hit rate, or true positive rate) 0.6 0.5 0.6 \nY(Youden index) 0.6 0.3 0.17143 \ndInd(Distance index) 0.4 0.53852 0.58624 \nsInd(Similarity index) 0.71716 0.61921 0.58547 \n\n"
]
],
[
[
"### Matrix",
"_____no_output_____"
]
],
[
[
"cm.print_matrix()",
"Predict L1 L2 L3 \nActual\nL1 3 0 2 \n\nL2 0 1 1 \n\nL3 0 2 3 \n\n\n"
],
[
"cm.matrix",
"_____no_output_____"
],
[
"cm.print_matrix(one_vs_all=True,class_name = \"L1\")",
"Predict L1 ~ \nActual\nL1 3 2 \n\n~ 0 7 \n\n\n"
],
[
"sparse_cm = ConfusionMatrix(matrix={1:{1:0,2:2},2:{1:0,2:18}})",
"_____no_output_____"
],
[
"sparse_cm.print_matrix(sparse=True)",
"Predict 2 \nActual\n1 2 \n\n2 18 \n\n\n"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `one_vs_all` : One-Vs-All mode flag (type : `bool`, default : `False`)\n2. `class_name` : target class name for One-Vs-All mode (type : `any valid type`, default : `None`)\n3. `sparse` : sparse mode printing flag (type : `bool`, default : `False`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `one_vs_all` option, new in <span style=\"color:red;\">version 1.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `matrix()` renamed to `print_matrix()` and `matrix` return confusion matrix as `dict` in <span style=\"color:red;\">version 1.5</span></li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `sparse` option, new in <span style=\"color:red;\">version 2.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Normalized matrix",
"_____no_output_____"
]
],
[
[
"cm.print_normalized_matrix()",
"Predict L1 L2 L3 \nActual\nL1 0.6 0.0 0.4 \n\nL2 0.0 0.5 0.5 \n\nL3 0.0 0.4 0.6 \n\n\n"
],
[
"cm.normalized_matrix",
"_____no_output_____"
],
[
"cm.print_normalized_matrix(one_vs_all=True,class_name = \"L1\")",
"Predict L1 ~ \nActual\nL1 0.6 0.4 \n\n~ 0.0 1.0 \n\n\n"
],
[
"sparse_cm.print_normalized_matrix(sparse=True)",
"Predict 2 \nActual\n1 1.0 \n\n2 1.0 \n\n\n"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `one_vs_all` : One-Vs-All mode flag (type : `bool`, default : `False`)\n2. `class_name` : target class name for One-Vs-All mode (type : `any valid type`, default : `None`)\n3. `sparse` : sparse mode printing flag (type : `bool`, default : `False`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `one_vs_all` option, new in <span style=\"color:red;\">version 1.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `normalized_matrix()` renamed to `print_normalized_matrix()` and `normalized_matrix` return normalized confusion matrix as `dict` in <span style=\"color:red;\">version 1.5</span></li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `sparse` option, new in <span style=\"color:red;\">version 2.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Stat",
"_____no_output_____"
]
],
[
[
"cm.stat()",
"Overall Statistics : \n\n95% CI (0.30439,0.86228)\nACC Macro 0.72222\nARI 0.09206\nAUNP 0.68571\nAUNU 0.67857\nBennett S 0.375\nCBA 0.47778\nCSI 0.17778\nChi-Squared 6.6\nChi-Squared DF 4\nConditional Entropy 0.97579\nCramer V 0.5244\nCross Entropy 1.58333\nF1 Macro 0.56515\nF1 Micro 0.58333\nFNR Macro 0.43333\nFNR Micro 0.41667\nFPR Macro 0.20952\nFPR Micro 0.20833\nGwet AC1 0.38931\nHamming Loss 0.41667\nJoint Entropy 2.45915\nKL Divergence 0.09998\nKappa 0.35484\nKappa 95% CI (-0.07708,0.78675)\nKappa No Prevalence 0.16667\nKappa Standard Error 0.22036\nKappa Unbiased 0.34426\nLambda A 0.42857\nLambda B 0.16667\nMutual Information 0.52421\nNIR 0.41667\nOverall ACC 0.58333\nOverall CEN 0.46381\nOverall J (1.225,0.40833)\nOverall MCC 0.36667\nOverall MCEN 0.51894\nOverall RACC 0.35417\nOverall RACCU 0.36458\nP-Value 0.18926\nPPV Macro 0.61111\nPPV Micro 0.58333\nPearson C 0.59568\nPhi-Squared 0.55\nRCI 0.35339\nRR 4.0\nReference Entropy 1.48336\nResponse Entropy 1.5\nSOA1(Landis & Koch) Fair\nSOA2(Fleiss) Poor\nSOA3(Altman) Fair\nSOA4(Cicchetti) Poor\nSOA5(Cramer) Relatively Strong\nSOA6(Matthews) Weak\nScott PI 0.34426\nStandard Error 0.14232\nTNR Macro 0.79048\nTNR Micro 0.79167\nTPR Macro 0.56667\nTPR Micro 0.58333\nZero-one Loss 5\n\nClass Statistics :\n\nClasses L1 L2 L3 \nACC(Accuracy) 0.83333 0.75 0.58333 \nAGF(Adjusted F-score) 0.72859 0.62869 0.61009 \nAGM(Adjusted geometric mean) 0.85764 0.70861 0.58034 \nAM(Difference between automatic and manual classification) -2 1 1 \nAUC(Area under the ROC curve) 0.8 0.65 0.58571 \nAUCI(AUC value interpretation) Very Good Fair Poor \nAUPR(Area under the PR curve) 0.8 0.41667 0.55 \nBCD(Bray-Curtis dissimilarity) 0.08333 0.04167 0.04167 \nBM(Informedness or bookmaker informedness) 0.6 0.3 0.17143 \nCEN(Confusion entropy) 0.25 0.49658 0.60442 \nDOR(Diagnostic odds ratio) None 4.0 2.0 \nDP(Discriminant power) None 0.33193 0.16597 \nDPI(Discriminant power interpretation) None Poor Poor \nERR(Error rate) 0.16667 0.25 0.41667 \nF0.5(F0.5 score) 0.88235 0.35714 0.51724 \nF1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545 \nF2(F2 score) 0.65217 0.45455 0.57692 \nFDR(False discovery rate) 0.0 0.66667 0.5 \nFN(False negative/miss/type 2 error) 2 1 2 \nFNR(Miss rate or false negative rate) 0.4 0.5 0.4 \nFOR(False omission rate) 0.22222 0.11111 0.33333 \nFP(False positive/type 1 error/false alarm) 0 2 3 \nFPR(Fall-out or false positive rate) 0.0 0.2 0.42857 \nG(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772 \nGI(Gini index) 0.6 0.3 0.17143 \nGM(G-mean geometric mean of specificity and sensitivity) 0.7746 0.63246 0.58554 \nIBA(Index of balanced accuracy) 0.36 0.28 0.35265 \nICSI(Individual classification success index) 0.6 -0.16667 0.1 \nIS(Information score) 1.26303 1.0 0.26303 \nJ(Jaccard index) 0.6 0.25 0.375 \nLS(Lift score) 2.4 2.0 1.2 \nMCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903 \nMCCI(Matthews correlation coefficient interpretation) Moderate Negligible Negligible \nMCEN(Modified confusion entropy) 0.26439 0.5 0.6875 \nMK(Markedness) 0.77778 0.22222 0.16667 \nN(Condition negative) 7 10 7 \nNLR(Negative likelihood ratio) 0.4 0.625 0.7 \nNLRI(Negative likelihood ratio interpretation) Poor Negligible Negligible \nNPV(Negative predictive value) 0.77778 0.88889 0.66667 \nOC(Overlap coefficient) 1.0 0.5 0.6 \nOOC(Otsuka-Ochiai coefficient) 0.7746 0.40825 0.54772 \nOP(Optimized precision) 0.58333 0.51923 0.55894 \nP(Condition positive or support) 5 2 5 \nPLR(Positive likelihood ratio) None 2.5 1.4 \nPLRI(Positive likelihood ratio interpretation) None Poor Poor \nPOP(Population) 12 12 12 \nPPV(Precision or positive predictive value) 1.0 0.33333 0.5 \nPRE(Prevalence) 0.41667 0.16667 0.41667 \nQ(Yule Q - coefficient of colligation) None 0.6 0.33333 \nQI(Yule Q interpretation) None Moderate Weak \nRACC(Random accuracy) 0.10417 0.04167 0.20833 \nRACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007 \nTN(True negative/correct rejection) 7 8 4 \nTNR(Specificity or true negative rate) 1.0 0.8 0.57143 \nTON(Test outcome negative) 9 9 6 \nTOP(Test outcome positive) 3 3 6 \nTP(True positive/hit) 3 1 3 \nTPR(Sensitivity, recall, hit rate, or true positive rate) 0.6 0.5 0.6 \nY(Youden index) 0.6 0.3 0.17143 \ndInd(Distance index) 0.4 0.53852 0.58624 \nsInd(Similarity index) 0.71716 0.61921 0.58547 \n\n"
],
[
"cm.stat(overall_param=[\"Kappa\"],class_param=[\"ACC\",\"AUC\",\"TPR\"])",
"Overall Statistics : \n\nKappa 0.35484\n\nClass Statistics :\n\nClasses L1 L2 L3 \nACC(Accuracy) 0.83333 0.75 0.58333 \nAUC(Area under the ROC curve) 0.8 0.65 0.58571 \nTPR(Sensitivity, recall, hit rate, or true positive rate) 0.6 0.5 0.6 \n\n"
],
[
"cm.stat(overall_param=[\"Kappa\"],class_param=[\"ACC\",\"AUC\",\"TPR\"],class_name=[\"L1\",\"L3\"])",
"Overall Statistics : \n\nKappa 0.35484\n\nClass Statistics :\n\nClasses L1 L3 \nACC(Accuracy) 0.83333 0.58333 \nAUC(Area under the ROC curve) 0.8 0.58571 \nTPR(Sensitivity, recall, hit rate, or true positive rate) 0.6 0.6 \n\n"
],
[
"cm.stat(summary=True)",
"Overall Statistics : \n\nACC Macro 0.72222\nF1 Macro 0.56515\nFPR Macro 0.20952\nKappa 0.35484\nOverall ACC 0.58333\nPPV Macro 0.61111\nSOA1(Landis & Koch) Fair\nTPR Macro 0.56667\nZero-one Loss 5\n\nClass Statistics :\n\nClasses L1 L2 L3 \nACC(Accuracy) 0.83333 0.75 0.58333 \nAUC(Area under the ROC curve) 0.8 0.65 0.58571 \nAUCI(AUC value interpretation) Very Good Fair Poor \nF1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545 \nFN(False negative/miss/type 2 error) 2 1 2 \nFP(False positive/type 1 error/false alarm) 0 2 3 \nFPR(Fall-out or false positive rate) 0.0 0.2 0.42857 \nN(Condition negative) 7 10 7 \nP(Condition positive or support) 5 2 5 \nPOP(Population) 12 12 12 \nPPV(Precision or positive predictive value) 1.0 0.33333 0.5 \nTN(True negative/correct rejection) 7 8 4 \nTON(Test outcome negative) 9 9 6 \nTOP(Test outcome positive) 3 3 6 \nTP(True positive/hit) 3 1 3 \nTPR(Sensitivity, recall, hit rate, or true positive rate) 0.6 0.5 0.6 \n\n"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `overall_param` : overall statistics names for print (type : `list`, default : `None`)\n2. `class_param` : class statistics names for print (type : `list`, default : `None`)\n3. `class_name` : class names for print (sub set of classes) (type : `list`, default : `None`)\n4. `summary` : summary mode flag (type : `bool`, default : `False`)\n",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `cm.params()` in prev versions (0.2 >) </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `overall_param` & `class_param` , new in <span style=\"color:red;\">version 1.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `class_name` , new in <span style=\"color:red;\">version 1.7 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `summary` , new in <span style=\"color:red;\">version 2.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"### Compare report",
"_____no_output_____"
]
],
[
[
"cp.print_report()",
"Best : cm2\n\nRank Name Class-Score Overall-Score\n1 cm2 9.05 2.55\n2 cm3 6.05 1.98333\n\n"
],
[
"print(cp)",
"Best : cm2\n\nRank Name Class-Score Overall-Score\n1 cm2 9.05 2.55\n2 cm3 6.05 1.98333\n\n"
]
],
[
[
"## Save",
"_____no_output_____"
]
],
[
[
"import os\nif \"Document_Files\" not in os.listdir():\n os.mkdir(\"Document_Files\")",
"_____no_output_____"
]
],
[
[
"### .pycm file",
"_____no_output_____"
]
],
[
[
"cm.save_stat(os.path.join(\"Document_Files\",\"cm1\"))",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1.pycm\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_stat(os.path.join(\"Document_Files\",\"cm1_filtered\"),overall_param=[\"Kappa\"],class_param=[\"ACC\",\"AUC\",\"TPR\"])",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered.pycm\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_stat(os.path.join(\"Document_Files\",\"cm1_filtered2\"),overall_param=[\"Kappa\"],class_param=[\"ACC\",\"AUC\",\"TPR\"],class_name=[\"L1\"])",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered2.pycm\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_stat(os.path.join(\"Document_Files\",\"cm1_summary\"),summary=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_summary.pycm\">Open File</a>",
"_____no_output_____"
]
],
[
[
"sparse_cm.save_stat(os.path.join(\"Document_Files\",\"sparse_cm\"),summary=True,sparse=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\sparse_cm.pycm\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_stat(\"cm1asdasd/\")",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `name` : output file name (type : `str`)\n2. `address` : flag for address return (type : `bool`, default : `True`)\n3. `overall_param` : overall statistics names for save (type : `list`, default : `None`)\n4. `class_param` : class statistics names for save (type : `list`, default : `None`)\n5. `class_name` : class names for print (sub set of classes) (type : `list`, default : `None`)\n6. `summary` : summary mode flag (type : `bool`, default : `False`)\n7. `sparse` : sparse mode printing flag (type : `bool`, default : `False`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.4</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `overall_param` & `class_param` , new in <span style=\"color:red;\">version 1.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `class_name` , new in <span style=\"color:red;\">version 1.7 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `summary` , new in <span style=\"color:red;\">version 2.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `sparse`, new in <span style=\"color:red;\">version 2.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"### HTML",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1\"))",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1_filtered\"),overall_param=[\"Kappa\"],class_param=[\"ACC\",\"AUC\",\"TPR\"])",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1_filtered2\"),overall_param=[\"Kappa\"],class_param=[\"ACC\",\"AUC\",\"TPR\"],class_name=[\"L1\"])",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered2.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1_colored\"),color=(255, 204, 255))",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_colored.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1_colored2\"),color=\"Crimson\")",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_colored2.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1_normalized\"),color=\"Crimson\",normalize=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_normalized.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(os.path.join(\"Document_Files\",\"cm1_summary\"),summary=True,normalize=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_summary.html\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_html(\"cm1asdasd/\")",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `name` : output file name (type : `str`)\n2. `address` : flag for address return (type : `bool`, default : `True`)\n3. `overall_param` : overall statistics names for save (type : `list`, default : `None`)\n4. `class_param` : class statistics names for save (type : `list`, default : `None`)\n5. `class_name` : class names for print (sub set of classes) (type : `list`, default : `None`)\n6. `color` : matrix color (R,G,B) (type : `tuple`/`str`, default : `(0,0,0)`), support <a href=\"https://en.wikipedia.org/wiki/X11_color_names\">X11 color names</a>\n7. `normalize` : save normalize matrix flag (type : `bool`, default : `False`)\n8. `summary` : summary mode flag (type : `bool`, default : `False`)\n9. `alt_link` : alternative link for document flag (type : `bool`, default : `False`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `overall_param` & `class_param` , new in <span style=\"color:red;\">version 1.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `class_name` , new in <span style=\"color:red;\">version 1.7 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `color`, new in <span style=\"color:red;\">version 1.8 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `normalize`, new in <span style=\"color:red;\">version 2.0 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `summary` and `alt_link` , new in <span style=\"color:red;\">version 2.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : If PyCM website is not available, set `alt_link = True`</li>\n</ul>",
"_____no_output_____"
],
[
"### CSV",
"_____no_output_____"
]
],
[
[
"cm.save_csv(os.path.join(\"Document_Files\",\"cm1\"))",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1.csv\">Open Stat File</a>\n<br/>\n<a href=\"Document_Files\\cm1_matrix.csv\">Open Matrix File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_csv(os.path.join(\"Document_Files\",\"cm1_filtered\"),class_param=[\"ACC\",\"AUC\",\"TPR\"])",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered.csv\">Open Stat File</a>\n<br/>\n<a href=\"Document_Files\\cm1_filtered_matrix.csv\">Open Matrix File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_csv(os.path.join(\"Document_Files\",\"cm1_filtered2\"),class_param=[\"ACC\",\"AUC\",\"TPR\"],normalize=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered2.csv\">Open Stat File</a>\n<br/>\n<a href=\"Document_Files\\cm1_filtered2_matrix.csv\">Open Matrix File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_csv(os.path.join(\"Document_Files\",\"cm1_filtered3\"),class_param=[\"ACC\",\"AUC\",\"TPR\"],class_name=[\"L1\"])",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_filtered3.csv\">Open Stat File</a>\n<br/>\n<a href=\"Document_Files\\cm1_filtered3_matrix.csv\">Open Matrix File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_csv(os.path.join(\"Document_Files\",\"cm1_header\"),header=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_header.csv\">Open Stat File</a>\n<br/>\n<a href=\"Document_Files\\cm1_header_matrix.csv\">Open Matrix File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_csv(os.path.join(\"Document_Files\",\"cm1_summary\"),summary=True,matrix_save=False)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_summary.csv\">Open Stat File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_csv(\"cm1asdasd/\")",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `name` : output file name (type : `str`)\n2. `address` : flag for address return (type : `bool`, default : `True`)\n4. `class_param` : class statistics names for save (type : `list`, default : `None`)\n5. `class_name` : class names for print (sub set of classes) (type : `list`, default : `None`)\n6. `matrix_save` : flag for saving matrix in seperate CSV file (type : `bool`, default : `True`)\n7. `normalize` : flag for saving normalized matrix instead of matrix (type : `bool`, default : `False`)\n8. `summary` : summary mode flag (type : `bool`, default : `False`)\n9. `header` : flag for adding header to matrix CSV file (type : `bool`, default : `False`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.6</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `class_param` , new in <span style=\"color:red;\">version 1.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `class_name` , new in <span style=\"color:red;\">version 1.7 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `matrix_save` and `normalize`, new in <span style=\"color:red;\">version 1.9 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `summary` , new in <span style=\"color:red;\">version 2.4 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `header` , new in <span style=\"color:red;\">version 2.6 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"### OBJ",
"_____no_output_____"
]
],
[
[
"cm.save_obj(os.path.join(\"Document_Files\",\"cm1\"))",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1.obj\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_obj(os.path.join(\"Document_Files\",\"cm1_stat\"),save_stat=True)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_stat.obj\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_obj(os.path.join(\"Document_Files\",\"cm1_no_vectors\"),save_vector=False)",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cm1_no_vectors.obj\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cm.save_obj(\"cm1asdasd/\")",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `name` : output file name (type : `str`)\n2. `address` : flag for address return (type : `bool`, default : `True`)\n3. `save_stat` : save statistics flag (type : `bool`, default : `False`)\n4. `save_vector` : save vectors flag (type : `bool`, default : `True`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 0.9.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : `save_vector` and `save_stat`, new in <span style=\"color:red;\">version 2.3 </span> </li>\n</ul>",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : For more information visit <a href=\"#Example-4-(File)\">Example 4</a></li>\n</ul>",
"_____no_output_____"
],
[
"### comp",
"_____no_output_____"
]
],
[
[
"cp.save_report(os.path.join(\"Document_Files\",\"cp\"))",
"_____no_output_____"
]
],
[
[
"<a href=\"Document_Files\\cp.comp\">Open File</a>",
"_____no_output_____"
]
],
[
[
"cp.save_report(\"cm1asdasd/\")",
"_____no_output_____"
]
],
[
[
"#### Parameters ",
"_____no_output_____"
],
[
"1. `name` : output file name (type : `str`)\n2. `address` : flag for address return (type : `bool`, default : `True`)",
"_____no_output_____"
],
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : new in <span style=\"color:red;\">version 2.0</span> </li>\n</ul>",
"_____no_output_____"
],
[
"## Input errors",
"_____no_output_____"
]
],
[
[
"try:\n cm2=ConfusionMatrix(y_actu, 2)\nexcept pycmVectorError as e:\n print(str(e))",
"The type of input vectors is assumed to be a list or a NumPy array\n"
],
[
"try:\n cm3=ConfusionMatrix(y_actu, [1,2,3])\nexcept pycmVectorError as e:\n print(str(e))",
"Input vectors must have same length\n"
],
[
"try:\n cm_4 = ConfusionMatrix([], [])\nexcept pycmVectorError as e:\n print(str(e))",
"Input vectors are empty\n"
],
[
"try:\n cm_5 = ConfusionMatrix([1,1,1,], [1,1,1,1])\nexcept pycmVectorError as e:\n print(str(e))",
"Input vectors must have same length\n"
],
[
"try:\n cm3=ConfusionMatrix(matrix={})\nexcept pycmMatrixError as e:\n print(str(e))",
"Input confusion matrix format error\n"
],
[
"try:\n cm_4=ConfusionMatrix(matrix={1:{1:2,\"1\":2},\"1\":{1:2,\"1\":3}})\nexcept pycmMatrixError as e:\n print(str(e))",
"Type of the input matrix classes is assumed be the same\n"
],
[
"try:\n cm_5=ConfusionMatrix(matrix={1:{1:2}})\nexcept pycmVectorError as e:\n print(str(e))",
"Number of the classes is lower than 2\n"
],
[
"try:\n cp=Compare([cm2,cm3])\nexcept pycmCompareError as e:\n print(str(e))",
"The input type is considered to be dictionary but it's not!\n"
],
[
"try:\n cp=Compare({\"cm1\":cm,\"cm2\":cm2})\nexcept pycmCompareError as e:\n print(str(e))",
"The domain of all ConfusionMatrix objects must be same! The sample size or the number of classes are different.\n"
],
[
"try:\n cp=Compare({\"cm1\":[],\"cm2\":cm2})\nexcept pycmCompareError as e:\n print(str(e))",
"The input is considered to consist of pycm.ConfusionMatrix object but it's not!\n"
],
[
"try:\n cp=Compare({\"cm2\":cm2})\nexcept pycmCompareError as e:\n print(str(e))",
"Lower than two confusion matrices is given for comparing. The minimum number of confusion matrix for comparing is 2.\n"
],
[
"try:\n cp=Compare({\"cm1\":cm2,\"cm2\":cm3},by_class=True,weight={1:2,2:0})\nexcept pycmCompareError as e:\n print(str(e))",
"The weight type must be dictionary and also must be set for all classes.\n"
],
[
"try:\n cm.CI(\"MCC\")\nexcept pycmCIError as e:\n print(str(e))",
"CI calculation for this parameter is not supported on this version of pycm.\nSupported parameters : TPR,TNR,PPV,NPV,ACC,PLR,NLR,FPR,FNR,AUC,PRE,Kappa,Overall ACC\n"
],
[
"try:\n cm.CI(2)\nexcept pycmCIError as e:\n print(str(e))",
"The input type is considered to be string but it's not!\n"
]
],
[
[
"<ul>\n <li><span style=\"color:red;\">Notice </span> : updated in <span style=\"color:red;\">version 2.5</span> </li>\n</ul>",
"_____no_output_____"
],
[
"## Examples",
"_____no_output_____"
],
[
"\n### Example-1 (Comparison of three different classifiers)\t\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example1.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example1.html)\n\n### Example-2 (How to plot via matplotlib)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example2.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example2.html)\n\n### Example-3 (Activation threshold)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example3.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example3.html)\n\n### Example-4 (File)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example4.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example4.html)\n\n### Example-5 (Sample weights)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example5.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example5.html)\n\n### Example-6 (Unbalanced data)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example6.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example6.html)\n\n### Example-7 (How to plot via seaborn+pandas)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example7.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example7.html)\n\n### Example-8 (Confidence interval)\n\n- [Jupyter Notebook](https://nbviewer.jupyter.org/github/sepandhaghighi/pycm/blob/master/Document/Example8.ipynb)\n- [HTML](http://www.pycm.ir/doc/Example8.html)",
"_____no_output_____"
],
[
"## Cite",
"_____no_output_____"
],
[
"If you use PyCM in your research, we would appreciate citations to the following paper :",
"_____no_output_____"
],
[
"<pre>Haghighi, S., Jasemi, M., Hessabi, S. and Zolanvari, A. (2018). PyCM: Multiclass confusion matrix library in Python.<br/>Journal of Open Source Software, 3(25), p.729.</pre>",
"_____no_output_____"
],
[
"<pre>\n@article{Haghighi2018,\n doi = {10.21105/joss.00729},\n url = {https://doi.org/10.21105/joss.00729},\n year = {2018},\n month = {may},\n publisher = {The Open Journal},\n volume = {3},\n number = {25},\n pages = {729},\n author = {Sepand Haghighi and Masoomeh Jasemi and Shaahin Hessabi and Alireza Zolanvari},\n title = {{PyCM}: Multiclass confusion matrix library in Python},\n journal = {Journal of Open Source Software}\n}\n</pre>",
"_____no_output_____"
],
[
"Download <a href=\"http://www.pycm.ir/PYCM.bib\">PyCM.bib</a>",
"_____no_output_____"
],
[
"## References",
"_____no_output_____"
],
[
"<blockquote id=\"ref1\">1- J. R. Landis, G. G. Koch, “The measurement of observer agreement for categorical data. Biometrics,” in International Biometric Society, pp. 159–174, 1977. </blockquote>\n\n<blockquote id=\"ref2\">2- D. M. W. Powers, “Evaluation: from precision, recall and f-measure to roc, informedness, markedness & correlation,” in Journal of Machine Learning Technologies, pp.37-63, 2011.</blockquote>\n\n\n<blockquote id=\"ref3\">3- C. Sammut, G. Webb, “Encyclopedia of Machine Learning” in Springer, 2011.</blockquote>\n\n<blockquote id=\"ref4\">4- J. L. Fleiss, “Measuring nominal scale agreement among many raters,” in Psychological Bulletin, pp. 378-382, 1971. </blockquote>\n\n<blockquote id=\"ref5\">5- D.G. Altman, “Practical Statistics for Medical Research,” in Chapman and Hall, 1990.</blockquote>\n\n<blockquote id=\"ref6\">6- K. L. Gwet, “Computing inter-rater reliability and its variance in the presence of high agreement,” in The British Journal of Mathematical and Statistical Psychology, pp. 29–48, 2008.”</blockquote>\n\n<blockquote id=\"ref7\">7- W. A. Scott, “Reliability of content analysis: The case of nominal scaling,” in Public Opinion Quarterly, pp. 321–325, 1955.</blockquote>\n\n<blockquote id=\"ref8\">8- E. M. Bennett, R. Alpert, and A. C. Goldstein, “Communication through limited response questioning,” in The Public Opinion Quarterly, pp. 303–308, 1954.</blockquote>\n\n<blockquote id=\"ref9\">9- D. V. Cicchetti, \"Guidelines, criteria, and rules of thumb for evaluating normed and standardized assessment instruments in psychology,\" in Psychological Assessment, pp. 284–290, 1994.</blockquote>\n\n<blockquote id=\"ref10\">10- R.B. Davies, \"Algorithm AS155: The Distributions of a Linear Combination of χ2 Random Variables,\" in Journal of the Royal Statistical Society, pp. 323–333, 1980.</blockquote>\n\n<blockquote id=\"ref11\">11- S. Kullback, R. A. Leibler \"On information and sufficiency,\" in Annals of Mathematical Statistics, pp. 79–86, 1951.</blockquote>\n\n<blockquote id=\"ref12\">12- L. A. Goodman, W. H. Kruskal, \"Measures of Association for Cross Classifications, IV: Simplification of Asymptotic Variances,\" in Journal of the American Statistical Association, pp. 415–421, 1972.</blockquote>\n\n<blockquote id=\"ref13\">13- L. A. Goodman, W. H. Kruskal, \"Measures of Association for Cross Classifications III: Approximate Sampling Theory,\" in Journal of the American Statistical Association, pp. 310–364, 1963. </blockquote>\n\n<blockquote id=\"ref14\">14- T. Byrt, J. Bishop and J. B. Carlin, “Bias, prevalence, and kappa,” in Journal of Clinical Epidemiology pp. 423-429, 1993.</blockquote>\n\n<blockquote id=\"ref15\">15- M. Shepperd, D. Bowes, and T. Hall, “Researcher Bias: The Use of Machine Learning in Software Defect Prediction,” in IEEE Transactions on Software Engineering, pp. 603-616, 2014.</blockquote>\n\n<blockquote id=\"ref16\">16- X. Deng, Q. Liu, Y. Deng, and S. Mahadevan, “An improved method to construct basic probability assignment based on the confusion matrix for classification problem, ” in Information Sciences, pp.250-261, 2016.</blockquote>\n\n<blockquote id=\"ref17\">17- J.-M. Wei, X.-J. Yuan, Q.-H. Hu, and S.-Q. J. E. S. w. A. Wang, \"A novel measure for evaluating classifiers,\" in Expert Systems with Applications, pp. 3799-3809, 2010.</blockquote>\n\n<blockquote id=\"ref18\">18- I. Kononenko and I. J. M. L. Bratko, \"Information-based evaluation criterion for classifier's performance,\" in Machine Learning, pp. 67-80, 1991.</blockquote>\n\n<blockquote id=\"ref19\">19- R. Delgado and J. D. Núñez-González, \"Enhancing Confusion Entropy as Measure for Evaluating Classifiers,\" in The 13th International Conference on Soft Computing Models in Industrial and Environmental Applications, pp. 79-89, 2018: Springer.</blockquote>\n\n<blockquote id=\"ref20\">20- J. J. C. b. Gorodkin and chemistry, \"Comparing two K-category assignments by a K-category correlation coefficient,\" in Computational Biology and chemistry, pp. 367-374, 2004.</blockquote>\n\n<blockquote id=\"ref21\">21- C. O. Freitas, J. M. De Carvalho, J. Oliveira, S. B. Aires, and R. Sabourin, \"Confusion matrix disagreement for multiple classifiers,\" in Iberoamerican Congress on Pattern Recognition, pp. 387-396, 2007.</blockquote>\n\n<blockquote id=\"ref22\">22- P. Branco, L. Torgo, and R. P. Ribeiro, \"Relevance-based evaluation metrics for multi-class imbalanced domains,\" in Pacific-Asia Conference on Knowledge Discovery and Data Mining, pp. 698-710, 2017. Springer.</blockquote>\n\n<blockquote id=\"ref23\">23- D. Ballabio, F. Grisoni, R. J. C. Todeschini, and I. L. Systems, \"Multivariate comparison of classification performance measures,\" in Chemometrics and Intelligent Laboratory Systems, pp. 33-44, 2018.</blockquote>\n\n<blockquote id=\"ref24\">24- J. J. E. Cohen and p. measurement, \"A coefficient of agreement for nominal scales,\" in Educational and Psychological Measurement, pp. 37-46, 1960.</blockquote>\n\n<blockquote id=\"ref25\">25- S. Siegel, \"Nonparametric statistics for the behavioral sciences,\" in \tNew York : McGraw-Hill, 1956.</blockquote>\n\n<blockquote id=\"ref26\">26- H. Cramér, \"Mathematical methods of statistics (PMS-9),\"in Princeton university press, 2016.</blockquote>\n\n<blockquote id=\"ref27\">27- B. W. J. B. e. B. A.-P. S. Matthews, \"Comparison of the predicted and observed secondary structure of T4 phage lysozyme,\" in Biochimica et Biophysica Acta (BBA) - Protein Structure, pp. 442-451, 1975.</blockquote>\n\n<blockquote id=\"ref28\">28- J. A. J. S. Swets, \"The relative operating characteristic in psychology: a technique for isolating effects of response bias finds wide use in the study of perception and cognition,\" in American Association for the Advancement of Science, pp. 990-1000, 1973.</blockquote> \n\n<blockquote id=\"ref29\">29- P. J. B. S. V. S. N. Jaccard, \"Étude comparative de la distribution florale dans une portion des Alpes et des Jura,\" in Bulletin de la Société vaudoise des sciences naturelles, pp. 547-579, 1901.</blockquote> \n\n<blockquote id=\"ref30\">30- T. M. Cover and J. A. Thomas, \"Elements of information theory,\" in John Wiley & Sons, 2012.</blockquote> \n\n<blockquote id=\"ref31\">31- E. S. Keeping, \"Introduction to statistical inference,\" in Courier Corporation, 1995.</blockquote>\n\n<blockquote id=\"ref32\">32- V. Sindhwani, P. Bhattacharya, and S. Rakshit, \"Information theoretic feature crediting in multiclass support vector machines,\" in Proceedings of the 2001 SIAM International Conference on Data Mining, pp. 1-18, 2001.</blockquote> \n\n<blockquote id=\"ref33\">33- M. Bekkar, H. K. Djemaa, and T. A. J. J. I. E. A. Alitouche, \"Evaluation measures for models assessment over imbalanced data sets,\" in Journal of Information Engineering and Applications, 2013.</blockquote>\n\n<blockquote id=\"ref34\">34- W. J. J. C. Youden, \"Index for rating diagnostic tests,\" in Cancer, pp. 32-35, 1950.</blockquote>\n\n<blockquote id=\"ref35\">35- S. Brin, R. Motwani, J. D. Ullman, and S. J. A. S. R. Tsur, \"Dynamic itemset counting and implication rules for market basket data,\" in Proceedings of the 1997 ACM SIGMOD international conference on Management of datavol, pp. 255-264, 1997.</blockquote> \n\n<blockquote id=\"ref36\">36- S. J. T. J. o. O. S. S. Raschka, \"MLxtend: Providing machine learning and data science utilities and extensions to Python’s scientific computing stack,\" in Journal of Open Source Software, 2018.</blockquote> \n\n<blockquote id=\"ref37\">37- J. BRAy and J. CuRTIS, \"An ordination of upland forest communities of southern Wisconsin.-ecological Monographs,\" in journal of Ecological Monographs, 1957.</blockquote>\t\n\n<blockquote id=\"ref38\">38- J. L. Fleiss, J. Cohen, and B. S. J. P. B. Everitt, \"Large sample standard errors of kappa and weighted kappa,\" in Psychological Bulletin, p. 323, 1969.</blockquote> \t\n\n<blockquote id=\"ref39\">39- M. Felkin, \"Comparing classification results between n-ary and binary problems,\" in Quality Measures in Data Mining: Springer, pp. 277-301, 2007.</blockquote> \n\n<blockquote id=\"ref40\">40- R. Ranawana and V. Palade, \"Optimized Precision-A new measure for classifier performance evaluation,\" in 2006 IEEE International Conference on Evolutionary Computation, pp. 2254-2261, 2006.</blockquote>\t\n\n<blockquote id=\"ref41\">41- V. García, R. A. Mollineda, and J. S. Sánchez, \"Index of balanced accuracy: A performance measure for skewed class distributions,\" in Iberian Conference on Pattern Recognition and Image Analysis, pp. 441-448, 2009.</blockquote> \n\n<blockquote id=\"ref42\">42- P. Branco, L. Torgo, and R. P. J. A. C. S. Ribeiro, \"A survey of predictive modeling on imbalanced domains,\" in Journal ACM Computing Surveys (CSUR), p. 31, 2016.</blockquote> \n\n<blockquote id=\"ref43\">43- K. Pearson, \"Notes on Regression and Inheritance in the Case of Two Parents,\" in Proceedings of the Royal Society of London, p. 240-242, 1895.</blockquote> \n\n<blockquote id=\"ref44\">44- W. J. I. Conover, New York, \"Practical Nonparametric Statistics,\" in John Wiley and Sons, 1999.</blockquote> \n\n<blockquote id=\"ref45\">45- Yule, G. U, \"On the methods of measuring association between two attributes.\" in Journal of the Royal Statistical Society, pp. 579-652, 1912.</blockquote>\n\n<blockquote id=\"ref46\">46- Batuwita, R. and Palade, V, \"A new performance measure for class imbalance learning. application to bioinformatics problems,\" in Machine Learning and Applications, pp.545–550, 2009.</blockquote>\n\n<blockquote id=\"ref47\">47- D. K. Lee, \"Alternatives to P value: confidence interval and effect size,\" Korean journal of anesthesiology, vol. 69, no. 6, p. 555, 2016.</blockquote>\n\n<blockquote id=\"ref48\">48- M. A. Raslich, R. J. Markert, and S. A. Stutes, \"Selecting and interpreting diagnostic tests,\" Biochemia medica: Biochemia medica, vol. 17, no. 2, pp. 151-161, 2007.</blockquote>\n\n<blockquote id=\"ref49\">49- D. E. Hinkle, W. Wiersma, and S. G. Jurs, \"Applied statistics for the behavioral sciences,\" 1988.</blockquote>\n\n<blockquote id=\"ref50\">50- A. Maratea, A. Petrosino, and M. Manzo, \"Adjusted F-measure and kernel scaling for imbalanced data learning,\" Information Sciences, vol. 257, pp. 331-341, 2014.</blockquote>\n\n<blockquote id=\"ref51\">51- L. Mosley, \"A balanced approach to the multi-class imbalance problem,\" 2013.</blockquote>\n\n<blockquote id=\"ref52\">52- M. Vijaymeena and K. Kavitha, \"A survey on similarity measures in text mining,\" Machine Learning and Applications: An International Journal, vol. 3, no. 2, pp. 19-28, 2016.</blockquote>\n\n<blockquote id=\"ref53\">53- Y. Otsuka, \"The faunal character of the Japanese Pleistocene marine Mollusca, as evidence of climate having become colder during the Pleistocene in Japan,\" Biogeograph. Soc. Japan, vol. 6, pp. 165-170, 1936.</blockquote>\n\n<blockquote id=\"ref54\">54- A. Tversky, \"Features of similarity,\" Psychological review, vol. 84, no. 4, p. 327, 1977.</blockquote>\n\n<blockquote id=\"ref55\">55- K. Boyd, K. H. Eng, and C. D. Page, \"Area under the precision-recall curve: point estimates and confidence intervals,\" in Joint European conference on machine learning and knowledge discovery in databases, 2013, pp. 451-466: Springer.</blockquote>\n\n<blockquote id=\"ref56\">56- J. Davis and M. Goadrich, \"The relationship between Precision-Recall and ROC curves,\" in Proceedings of the 23rd international conference on Machine learning, 2006, pp. 233-240: ACM.</blockquote>\n\n<blockquote id=\"ref57\">57- M. Kuhn, \"Building predictive models in R using the caret package,\" Journal of statistical software, vol. 28, no. 5, pp. 1-26, 2008.</blockquote>\n\n<blockquote id=\"ref58\">58- V. Labatut and H. Cherifi, \"Accuracy measures for the comparison of classifiers,\" arXiv preprint, 2012.</blockquote>\n\n<blockquote id=\"ref59\">59- S. Wallis, \"Binomial confidence intervals and contingency tests: mathematical fundamentals and the evaluation of alternative methods,\" Journal of Quantitative Linguistics, vol. 20, no. 3, pp. 178-208, 2013.</blockquote>\n\n<blockquote id=\"ref60\">60- D. Altman, D. Machin, T. Bryant, and M. Gardner, Statistics with confidence: confidence intervals and statistical guidelines. John Wiley & Sons, 2013.</blockquote>\n\n<blockquote id=\"ref61\">61- J. A. Hanley and B. J. McNeil, \"The meaning and use of the area under a receiver operating characteristic (ROC) curve,\" Radiology, vol. 143, no. 1, pp. 29-36, 1982.</blockquote>\n\n<blockquote id=\"ref62\">62- E. B. Wilson, \"Probable inference, the law of succession, and statistical inference,\" Journal of the American Statistical Association, vol. 22, no. 158, pp. 209-212, 1927.</blockquote>\n\n<blockquote id=\"ref63\">63- A. Agresti and B. A. Coull, \"Approximate is better than “exact” for interval estimation of binomial proportions,\" The American Statistician, vol. 52, no. 2, pp. 119-126, 1998.</blockquote>\n\n<blockquote id=\"ref64\">64- C. S. Peirce, \"The numerical measure of the success of predictions,\" Science, no. 93, pp. 453-454, 1884.</blockquote>\n\n<blockquote id=\"ref65\">65- E. W. Steyerberg, B. Van Calster, and M. J. Pencina, \"Performance measures for prediction models and markers: evaluation of predictions and classifications,\" Revista Española de Cardiología, vol. 64, no. 9, pp. 788-794, 2011.</blockquote>\n\n<blockquote id=\"ref66\">66- A. J. Vickers and E. B. Elkin, \"Decision curve analysis: a novel method for evaluating prediction models,\" Medical Decision Making, vol. 26, no. 6, pp. 565-574, 2006.</blockquote>\n\n<blockquote id=\"ref67\">67- D. Knoke, G. W. Bohrnstedt, and A. P. Mee, Statistics for social data analysis. FE Peacock Publishers Itasca, IL, 2002</blockquote>\n\n<blockquote id=\"ref68\">68- W. M. Rand, \"Objective criteria for the evaluation of clustering methods,\" Journal of the American Statistical association, vol. 66, no. 336, pp. 846-850, 1971.</blockquote>\n\n<blockquote id=\"ref69\">69- J. M. Santos and M. Embrechts, \"On the use of the adjusted rand index as a metric for evaluating supervised classification,\" in International conference on artificial neural networks, 2009: Springer, pp. 175-184.</blockquote>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e752f286e305f197cbb792edbb13a8a57461e630 | 13,362 | ipynb | Jupyter Notebook | 02-intermediate/language_model/main.ipynb | foamliu/PyTorch-Tutorial | 4cc629a6fbc9225018f256d2b11f5de3bb9af90d | [
"Apache-2.0"
] | null | null | null | 02-intermediate/language_model/main.ipynb | foamliu/PyTorch-Tutorial | 4cc629a6fbc9225018f256d2b11f5de3bb9af90d | [
"Apache-2.0"
] | null | null | null | 02-intermediate/language_model/main.ipynb | foamliu/PyTorch-Tutorial | 4cc629a6fbc9225018f256d2b11f5de3bb9af90d | [
"Apache-2.0"
] | null | null | null | 41.887147 | 232 | 0.552313 | [
[
[
"# Some part of the code was referenced from below.\n# https://github.com/pytorch/examples/tree/master/word_language_model \nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.nn.utils import clip_grad_norm\nfrom data_utils import Dictionary, Corpus\n",
"_____no_output_____"
],
[
"# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Hyper-parameters\nembed_size = 128\nhidden_size = 1024\nnum_layers = 1\nnum_epochs = 5\nnum_samples = 1000 # number of words to be sampled\nbatch_size = 20\nseq_length = 30\nlearning_rate = 0.002",
"_____no_output_____"
],
[
"# Load \"Penn Treebank\" dataset\ncorpus = Corpus()\nids = corpus.get_data('data/train.txt', batch_size)\nvocab_size = len(corpus.dictionary)\nnum_batches = ids.size(1) // seq_length",
"_____no_output_____"
],
[
"# RNN based language model\nclass RNNLM(nn.Module):\n def __init__(self, vocab_size, embed_size, hidden_size, num_layers):\n super(RNNLM, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n \n def forward(self, x, h):\n # Embed word ids to vectors\n x = self.embed(x)\n \n # Forward propagate LSTM\n out, (h, c) = self.lstm(x, h)\n \n # Reshape output to (batch_size*sequence_length, hidden_size)\n out = out.reshape(out.size(0)*out.size(1), out.size(2))\n \n # Decode hidden states of all time steps\n out = self.linear(out)\n return out, (h, c)\n\nmodel = RNNLM(vocab_size, embed_size, hidden_size, num_layers).to(device)",
"_____no_output_____"
],
[
"# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# Truncated backpropagation\ndef detach(states):\n return [state.detach() for state in states] \n",
"_____no_output_____"
],
[
"# Train the model\nfor epoch in range(num_epochs):\n # Set initial hidden and cell states\n states = (torch.zeros(num_layers, batch_size, hidden_size).to(device),\n torch.zeros(num_layers, batch_size, hidden_size).to(device))\n \n for i in range(0, ids.size(1) - seq_length, seq_length):\n # Get mini-batch inputs and targets\n inputs = ids[:, i:i+seq_length].to(device)\n targets = ids[:, (i+1):(i+1)+seq_length].to(device)\n \n # Forward pass\n states = detach(states)\n outputs, states = model(inputs, states)\n loss = criterion(outputs, targets.reshape(-1))\n \n # Backward and optimize\n model.zero_grad()\n loss.backward()\n clip_grad_norm(model.parameters(), 0.5)\n optimizer.step()\n\n step = (i+1) // seq_length\n if step % 100 == 0:\n print ('Epoch [{}/{}], Step[{}/{}], Loss: {:.4f}, Perplexity: {:5.2f}'\n .format(epoch+1, num_epochs, step, num_batches, loss.item(), np.exp(loss.item())))",
"c:\\users\\foamliu.fareast\\appdata\\local\\programs\\python\\python35\\lib\\site-packages\\ipykernel_launcher.py:20: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.\n"
],
[
"# Test the model\nwith torch.no_grad():\n with open('sample.txt', 'w') as f:\n # Set intial hidden ane cell states\n state = (torch.zeros(num_layers, 1, hidden_size).to(device),\n torch.zeros(num_layers, 1, hidden_size).to(device))\n\n # Select one word id randomly\n prob = torch.ones(vocab_size)\n input = torch.multinomial(prob, num_samples=1).unsqueeze(1).to(device)\n\n for i in range(num_samples):\n # Forward propagate RNN \n output, state = model(input, state)\n\n # Sample a word id\n prob = output.exp()\n word_id = torch.multinomial(prob, num_samples=1).item()\n\n # Fill input with sampled word id for the next time step\n input.fill_(word_id)\n\n # File write\n word = corpus.dictionary.idx2word[word_id]\n word = '\\n' if word == '<eos>' else word + ' '\n f.write(word)\n\n if (i+1) % 100 == 0:\n print('Sampled [{}/{}] words and save to {}'.format(i+1, num_samples, 'sample.txt'))",
"Sampled [100/1000] words and save to sample.txt\nSampled [200/1000] words and save to sample.txt\nSampled [300/1000] words and save to sample.txt\nSampled [400/1000] words and save to sample.txt\nSampled [500/1000] words and save to sample.txt\nSampled [600/1000] words and save to sample.txt\nSampled [700/1000] words and save to sample.txt\nSampled [800/1000] words and save to sample.txt\nSampled [900/1000] words and save to sample.txt\nSampled [1000/1000] words and save to sample.txt\n"
],
[
"# Save the model checkpoints\ntorch.save(model.state_dict(), 'model.ckpt')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e752fcc41a26f30aca31123510f091499913c2c2 | 40,136 | ipynb | Jupyter Notebook | scripts/scripts_ipynb/0422_figs.ipynb | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 1 | 2021-11-25T16:11:56.000Z | 2021-11-25T16:11:56.000Z | scripts/scripts_ipynb/0422_figs.ipynb | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 6 | 2020-02-17T13:44:43.000Z | 2020-06-25T15:35:05.000Z | scripts/scripts_ipynb/0422_figs.ipynb | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 1 | 2021-11-25T16:11:56.000Z | 2021-11-25T16:11:56.000Z | 22.94797 | 811 | 0.435295 | [
[
[
"from analysis.evol_lambda import *\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nmpgs = pickle.load(open('./main_prgs_GM.pickle', 'rb'))",
"_____no_output_____"
],
[
"zreds=[]\naexps=[]\nbase='./05427/'\nimport load\nnout_ini = 1000\nfor mpg in mpgs:\n nout_ini = min((nout_ini, min(mpg.nouts)))\n\nfor nout in mpgs[0].nouts:\n info = load.info.Info(nout=nout, base=wdir, load=True)\n aexps.append(info.aexp)\n zreds.append(info.zred)\naexps = np.array(aexps)\nzreds = np.array(zreds)\n\nnout_fi = 187\nnnouts = nout_fi - nout_ini + 1\n\nwdir = './'",
"_____no_output_____"
],
[
"def aexp2zred(aexp):\n return [1.0/a - 1.0 for a in aexp]\n\ndef zred2aexp(zred):\n return [1.0/(1.0 + z) for z in zred]\n\ndef lbt2aexp(lts):\n import astropy.units as u\n from astropy.cosmology import WMAP7, z_at_value\n zreds = [z_at_value(WMAP7.lookback_time, ll * u.Gyr) for ll in lts]\n return [1.0/(1+z) for z in zreds]\n\n# For a given list of nouts, \n# calculate a nice-looking set of zreds.\n# AND lookback times\nz_targets=[0, 0.2, 0.5, 1, 2, 3]\nz_target_str=[\"{:.2f}\".format(z) for z in z_targets]\na_targets_z = zred2aexp(z_targets)\nz_pos = [nout_ini + (1 - (max(aexps) - a)/aexps.ptp()) * nnouts for a in a_targets_z]\n\nlbt_targets=[0.00001,1,3,5,8,12]\nlbt_target_str=[\"{:.0f}\".format(l) for l in lbt_targets]\na_targets_lbt = lbt2aexp(lbt_targets)\nlbt_pos = [nout_ini + (1 - (max(aexps) - a)/aexps.ptp()) * nnouts for a in a_targets_lbt]\n#from astropy.cosmology import WMAP7 as cosmo\n#lookback_t=[cosmo.lookback_time(i).value for i in zreds]",
"_____no_output_____"
]
],
[
[
"## delta M Vs delta Lambda",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()",
"_____no_output_____"
],
[
"dts = [1,3,5,10]\nalldm =[[]]*len(dts)\nalldl =[[]]*len(dts)\nallmavg=[[]]*len(dts)\nalllavg=[[]]*len(dts)\nfor i, dt in enumerate(dts):\n dm =[]\n dl =[]\n mavg =[]\n lavg =[]\n \n for gal in mpgs:\n ind = gal.data['lambda_r'] > 0\n mstar = gal.data['mstar'][ind]\n sbam = gal.data['lambda_r'][ind]\n #dm.extend((mstar[:-dt] - mstar[dt:])/mstar[dt:])\n #dl.extend(sbam[:-dt] - sbam[dt:])\n mavg = [sum(mstar[i*dt:(i+1)*dt])/dt for i in range(int(len(mstar)/dt))]\n lavg = [sum(sbam[i*dt:(i+1)*dt])/dt for i in range(int(len(sbam)/dt))]\n mavg = np.array(mavg)\n lavg = np.array(lavg)\n dm.extend((mavg[:-1] - mavg[1:])/mavg[1:])\n dl.extend(lavg[:-1] - lavg[1:])\n alldm[i] = dm\n alldl[i] = dl\n allmavg[i] = mavg\n alllavg[i] = lavg\n\n \n #dm = np.array((mavg[:1]-mavg[1:])/mavg[1:])\n #dl = np.array(lavg[:1]-lavg[1:])",
"_____no_output_____"
],
[
"do_contour=True\ndo_3d = False\n\nfig, ax = plt.subplots(2,2)\nax = ax.ravel()\n\n\nfor i, dt in enumerate(dts):\n dm = np.array(alldm[i])\n dl = np.array(alldl[i])\n \n if do_contour:\n import scipy.stats as st\n xmin, xmax = -0.1, 0.1\n ymin, ymax = -0.1, 0.1\n\n xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]\n positions = np.vstack([xx.ravel(), yy.ravel()])\n values = np.vstack([dm, dl])\n kernel = st.gaussian_kde(values)\n f = np.reshape(kernel(positions).T, xx.shape)\n f /= max(f.ravel())\n levels=[0.002, 0.02, 0.05, 0.2, 0.4, 0.95]\n if do_3d:\n from mpl_toolkits.mplot3d import Axes3D\n ax[i] = fig.gca(projection='3d')\n surf = ax[i].plot_surface(xx, yy, f, rstride=1, cstride=1, cmap='jet',\n linewidth=0, antialiased=False)\n else:\n cfset = ax[i].contour(xx, yy, f, levels=levels, cmap='winter', lw=5)\n else:\n ax[i].scatter(dm,dl, edgecolors='none', alpha=0.1)\n ax[i].set_xlim([xmin,xmax])\n ax[i].set_ylim([ymin,ymax])\n ax[i].scatter([0],[0], s=[100], linewidth='3', marker='*', c='red')\n ax[i].set_title(\"dt = {}\".format(dt))\n #ax[i].hist2d(dm,dl,bins = 20, range=([-0.2,0.4],[-0.2,0.2]), cmap='PuBu')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"dl.mean()",
"_____no_output_____"
]
],
[
[
"### Mstar, Lambda all",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(2)\n# * (gal.data['mstar'] > 1e11)\nfor gal in mpgs:\n ind = np.where(gal.data['lambda_r'] > 0)[0]\n mstar_ini = np.average(gal.data['mstar'][ind[-5:]])\n l_ini = np.average(gal.data['lambda_r'][ind[-5:]])\n ax[0].plot(gal.nouts[ind], np.log10(gal.data['mstar'][ind]/mstar_ini), c=\"grey\", alpha=0.05)\n ax[0].set_ylim([-1,1])\n ax[1].plot(gal.nouts[ind], gal.data['lambda_r'][ind] - l_ini, c=\"grey\", alpha=0.05)\n ax[1].set_ylim([-0.5,0.5])\nplt.show()",
"_____no_output_____"
],
[
"for gal in mpgs:\n print(gal.data['idx'][0])",
"241740\n241535\n241448\n241363\n241338\n241306\n241258\n241228\n241202\n241176\n241140\n241565\n241482\n241403\n241347\n241312\n241269\n241240\n241216\n241183\n241157\n241581\n241498\n241411\n241351\n241318\n241294\n241243\n241219\n241191\n241162\n241685\n241534\n241446\n241361\n241337\n241303\n241255\n241227\n241200\n241175\n241646\n241506\n241418\n241352\n241327\n241296\n241248\n241221\n241193\n241165\n241564\n241480\n241400\n241346\n241311\n241266\n241238\n241214\n241181\n241154\n241649\n241520\n241426\n241358\n241328\n241300\n241252\n241224\n241196\n241169\n241665\n241530\n241437\n241359\n241330\n241302\n241253\n241225\n241197\n241173\n241566\n241489\n241410\n241350\n241314\n241275\n241242\n241218\n241189\n241159\n241545\n241465\n241369\n241342\n241309\n241263\n241235\n241210\n241179\n241151\n241563\n241467\n241376\n241344\n241310\n241265\n241237\n241213\n241180\n241153\n241540\n241453\n241368\n241339\n241308\n241260\n241233\n241206\n241178\n241146\n183678\n183555\n183482\n183432\n183403\n183359\n183580\n183503\n183442\n183417\n183386\n183335\n183607\n183526\n183450\n183424\n183393\n183341\n183588\n183525\n183448\n183423\n183390\n183337\n183621\n183528\n183474\n183425\n183394\n183342\n183582\n183517\n183444\n183418\n183387\n183336\n183651\n183552\n183478\n183431\n183399\n183355\n183624\n183545\n183475\n183429\n183395\n183352\n183696\n183560\n183483\n183435\n183409\n183365\n183565\n183492\n183440\n183413\n183376\n183563\n183487\n183437\n183411\n183373\n183566\n183502\n183441\n183416\n183379\n223849\n223794\n223723\n223698\n223680\n223656\n223644\n223620\n223608\n223586\n223578\n223563\n223557\n223549\n223538\n223521\n223500\n223492\n223485\n223470\n223462\n223451\n223434\n223419\n224041\n223798\n223732\n223702\n223689\n223657\n223646\n223626\n223609\n223590\n223580\n223564\n223558\n223553\n223539\n223524\n223510\n223496\n223487\n223475\n223465\n223452\n223438\n223424\n224043\n223817\n223744\n223710\n223693\n223676\n223649\n223632\n223610\n223595\n223584\n223571\n223559\n223554\n223543\n223529\n223513\n223498\n223488\n223479\n223466\n223456\n223439\n223425\n223820\n223767\n223713\n223694\n223679\n223653\n223635\n223615\n223606\n223585\n223573\n223560\n223555\n223545\n223537\n223519\n223499\n223489\n223481\n223467\n223459\n223442\n223431\n40483\n40470\n40490\n40473\n40485\n40472\n40503\n40481\n40494\n40478\n40492\n40476\n40495\n40480\n40482\n88861\n88816\n88769\n88856\n88812\n88767\n88984\n88851\n88792\n88881\n88818\n88781\n88906\n88836\n88786\n88905\n88820\n88783\n88991\n88852\n88801\n88855\n88810\n49757\n49738\n49764\n49746\n49761\n49740\n49753\n49733\n49766\n49750\n49763\n49742\n985771\n985489\n985067\n984883\n984732\n984599\n984508\n984420\n984344\n984283\n984211\n984145\n984078\n984025\n983970\n983907\n983855\n983799\n983724\n983664\n985636\n985093\n984900\n984752\n984616\n984520\n984426\n984358\n984292\n984231\n984154\n984090\n984035\n983976\n983916\n983869\n983803\n983736\n983670\n985757\n985440\n985063\n984872\n984730\n984580\n984502\n984417\n984335\n984281\n984208\n984141\n984076\n984023\n983968\n983902\n983847\n983793\n983720\n983660\n985678\n985160\n984946\n984794\n984674\n984543\n984464\n984384\n984309\n984259\n984179\n984118\n984054\n983998\n983936\n983883\n983821\n983766\n983696\n985681\n985190\n984951\n984837\n984683\n984550\n984475\n984388\n984313\n984261\n984186\n984124\n984062\n984006\n983950\n983887\n983828\n983771\n983700\n985660\n985140\n984922\n984785\n984648\n984532\n984447\n984375\n984302\n984249\n984176\n984112\n984052\n983992\n983934\n983881\n983817\n983758\n983687\n985786\n985630\n985086\n984893\n984743\n984614\n984515\n984422\n984354\n984286\n984220\n984150\n984085\n984032\n983972\n983911\n983858\n983801\n983726\n983666\n985656\n985126\n984916\n984780\n984640\n984526\n984437\n984366\n984298\n984245\n984171\n984109\n984046\n983986\n983921\n983876\n983810\n983750\n983681\n985710\n985276\n984996\n984855\n984699\n984554\n984483\n984401\n984319\n984265\n984191\n984133\n984066\n984012\n983956\n983892\n983838\n983778\n983710\n985644\n985097\n984903\n984765\n984622\n984522\n984428\n984361\n984294\n984236\n984161\n984097\n984038\n983979\n983918\n983872\n983804\n983737\n983674\n985716\n985289\n985030\n984864\n984700\n984562\n984492\n984407\n984321\n984268\n984196\n984134\n984069\n984018\n983957\n983893\n983840\n983779\n983713\n985734\n985384\n985040\n984867\n984717\n984574\n984498\n984409\n984329\n984273\n984204\n984136\n984074\n984021\n983963\n983898\n983843\n983784\n983718\n983645\n985650\n985114\n984914\n984772\n984627\n984525\n984433\n984365\n984297\n984241\n984168\n984100\n984041\n983984\n983920\n983875\n983809\n983749\n983677\n985648\n985105\n984908\n984769\n984625\n984524\n984432\n984363\n984296\n984238\n984167\n984098\n984039\n983981\n983919\n983873\n983806\n983743\n983676\n985767\n985447\n985064\n984879\n984731\n984588\n984504\n984418\n984341\n984282\n984210\n984144\n984077\n984024\n983969\n983905\n983852\n983798\n983723\n983662\n985718\n985339\n985033\n984865\n984706\n984569\n984497\n984408\n984322\n984269\n984198\n984135\n984071\n984019\n983958\n983896\n983842\n983783\n983717\n985659\n985133\n984917\n984783\n984643\n984531\n984439\n984367\n984299\n984246\n984173\n984111\n984051\n983990\n983928\n983878\n983813\n983756\n983683\n985635\n985090\n984896\n984751\n984615\n984516\n984424\n984357\n984289\n984224\n984153\n984089\n984034\n983975\n983915\n983861\n983802\n983728\n983669\n985674\n985153\n984927\n984786\n984667\n984541\n984460\n984377\n984308\n984250\n984178\n984117\n984053\n983996\n983935\n983882\n983819\n983764\n983689\n985780\n985560\n985082\n984892\n984737\n984601\n984511\n984421\n984349\n984284\n984217\n984146\n984084\n984031\n983971\n983909\n983856\n983800\n983725\n983665\n985741\n985439\n985056\n984871\n984718\n984578\n984499\n984412\n984334\n984274\n984205\n984139\n984075\n984022\n983965\n983900\n983844\n983787\n983719\n983659\n985697\n985266\n984969\n984840\n984696\n984553\n984482\n984397\n984318\n984264\n984190\n984129\n984065\n984008\n983954\n983891\n983836\n983777\n983709\n985679\n985174\n984949\n984831\n984679\n984549\n984471\n984387\n984312\n984260\n984182\n984122\n984061\n984004\n983938\n983886\n983826\n983770\n983698\n985687\n985244\n984959\n984839\n984690\n984552\n984481\n984389\n984315\n984263\n984188\n984126\n984064\n984007\n983953\n983890\n983831\n983776\n983702\n167109\n167081\n167051\n167036\n167025\n167012\n167003\n166986\n166976\n166964\n166954\n166936\n166925\n166908\n166894\n166884\n167099\n167075\n167049\n167032\n167024\n167011\n167002\n166984\n166973\n166962\n166952\n166934\n166922\n166906\n166890\n166879\n167083\n167054\n167037\n167026\n167017\n167004\n166990\n166978\n166965\n166959\n166941\n166926\n166915\n166897\n166886\n167097\n167056\n167047\n167031\n167021\n167005\n166999\n166979\n166968\n166960\n166946\n166932\n166918\n166904\n166888\n300306\n300124\n300067\n300020\n299980\n299952\n299924\n299908\n299882\n299852\n299832\n299816\n299804\n299772\n299754\n299736\n299720\n299701\n299688\n299668\n299651\n299635\n299622\n299605\n300356\n300154\n300096\n300045\n299997\n299975\n299941\n299917\n299893\n299867\n299837\n299825\n299811\n299788\n299763\n299748\n299729\n299708\n299693\n299673\n299657\n299643\n299628\n299611\n300331\n300144\n300087\n300027\n299986\n299958\n299938\n299912\n299888\n299861\n299834\n299818\n299807\n299775\n299758\n299744\n299723\n299703\n299690\n299670\n299655\n299637\n299626\n299607\n300361\n300177\n300109\n300050\n300000\n299976\n299945\n299920\n299897\n299868\n299838\n299826\n299813\n299791\n299766\n299749\n299731\n299710\n299695\n299674\n299662\n299645\n299632\n299613\n300315\n300142\n300084\n300021\n299984\n299956\n299937\n299910\n299887\n299854\n299833\n299817\n299805\n299773\n299756\n299742\n299721\n299702\n299689\n299669\n299654\n299636\n299625\n299606\n300368\n300201\n300113\n300053\n300006\n299977\n299948\n299922\n299904\n299869\n299840\n299828\n299814\n299799\n299769\n299751\n299733\n299714\n299697\n299682\n299663\n299647\n299633\n299615\n300335\n300153\n300094\n300035\n299992\n299969\n299940\n299914\n299892\n299865\n299836\n299821\n299809\n299780\n299760\n299746\n299728\n299705\n299692\n299671\n299656\n299642\n299627\n299609\n300224\n300123\n300061\n300018\n299978\n299951\n299923\n299907\n299872\n299844\n299830\n299815\n299801\n299771\n299753\n299735\n299717\n299698\n299684\n299667\n299650\n299634\n299619\n123037\n122767\n122745\n122706\n122687\n122665\n123039\n122772\n122747\n122709\n122690\n122668\n123054\n122780\n122752\n122715\n122694\n122673\n123051\n122776\n122751\n122714\n122693\n122672\n123047\n122773\n122750\n122710\n122692\n122670\n122835\n122758\n122728\n122698\n122679\n122784\n122753\n122717\n122695\n122674\n123025\n122766\n122739\n122705\n122683\n122899\n122759\n122729\n122701\n122680\n122787\n122754\n122724\n122696\n122675\n122824\n122755\n122727\n122697\n122676\n123024\n122761\n122737\n122704\n122682\n"
]
],
[
[
"### Merger epoch with Mstar vs lambda",
"_____no_output_____"
]
],
[
[
"import tree.ctutils as ctu\nfrom tree import treeutils\nimport numpy as np\nimport pickle\n\n# Calculate merger event parameters\ndef find_merger(atree, idx=None, aexp_min=0.0):\n \"\"\"\n find indices of merger event from a tree.\n (Full tree or main progenitor trunk)\n \"\"\"\n if idx == None:\n idx = atree['id'][0]\n\n nprg = 1\n merger_list=[]\n\n i = 0\n while nprg > 0:\n idx = ctu.get_progenitors(atree, idx, main=True)[0]\n ind = np.where(atree['id'] == idx)[0]\n if atree['aexp'][ind] < aexp_min:\n break\n nprg = ctu.get_npr(atree, idx)\n\n if nprg > 1:\n merger_list.append(i)\n i +=1\n return merger_list\n\n\n\ndef merger_mass_ratio(atree, idx=None):\n \"\"\"\n return mass ratio of the given merger event\n \"\"\"\n if idx == None:\n idx = atree['id'][0]\n\n prgs = ctu.get_progenitors(atree, idx)\n\n # only for mergers\n if len(prgs) > 1:\n i_prgs = [np.where(atree['id'] == i)[0] for i in prgs]\n mass = []\n for iprg in i_prgs:\n mass.append(atree['m'])\n else:\n print(\"This is not a merger\")\n return 0\n\n\ndef merger_properties_main_prg(atree, idx):\n \"\"\"\n Calculate merger mass ratio for \"one\" merger event.\n\n if idx == None:\n if nout == None:\n print(\"Both idx and nout are missing\")\n return\n else:\n if nout == None:\n nout = np.where(atree['id'] == idx)[0]\n\n idx = atree['id'][ind]\n \"\"\"\n\n #prgs = get_progenitors(atree, idx)\n #if len(prgs) > 1:\n # i_prgs = [np.where(atree['id'] == i)[0] for i in prgs]\n\n i_prgs = np.where(atree['desc_id'] == idx)[0]\n\n print(i_prgs)\n id_prgs = atree['id'][i_prgs]\n mass_prgs = atree['m'][i_prgs]\n\n #mass_prgs_norm = mass_prgs / sum(mass_prgs)\n\n return mass_prgs\n\n \ndef load_tree(wdir, is_gal=False, no_dump=False):\n import pickle\n from tree import treemodule\n import tree.ctutils as ctu\n\n alltrees = treemodule.CTree()\n\n\n if is_gal:\n # Galaxy tree\n tree_path = 'GalaxyMaker/Trees/'\n else:\n # halo tree\n tree_path = 'halo/Trees/'\n\n try:\n alltrees = pickle.load(open(wdir + tree_path + \"extended_tree.pickle\", \"rb\" ))\n print(\"Loaded an extended tree\")\n except:\n alltrees = treemodule.CTree()\n alltrees.load(filename= wdir + tree_path + 'tree_0_0_0.dat')\n if not no_dump:\n # Fix nout -----------------------------------------------------\n nout_max = alltrees.data['nout'].max()\n alltrees.data['nout'] += 187 - nout_max\n print(\"------ NOUT fixed\")\n alltrees.data = ctu.augment_tree(alltrees.data, wdir, is_gal=is_gal)\n print(\"------ tree data extended\")\n\n return alltrees",
"_____no_output_____"
]
],
[
[
"##### merger epochs",
"_____no_output_____"
]
],
[
[
"# multi page PDF\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfig, ax = plt.subplots(2, sharex=True)\nplt.subplots_adjust(hspace=0.001)\nwith PdfPages('multipage_pdf.pdf') as pdf:\n for i in inds[0:3]:\n gal = mpgs[i]\n ax[0].scatter(gal.nouts, np.log10(gal.data['mstar']))\n ax[0].set_xlim([50,190])\n #ax[0].set_ylim([8,13])\n ax[1].plot(gal.nouts, gal.data['lambda_r'], 'r-')\n #ax[1].set_xlim([50,190])\n #ax[1].set_ylim([0,1])\n #print(gal.merger.nout, gal.merger.mr)\n for mr, xx in zip(gal.merger.mr, gal.merger.nout):\n ax[0].axvline(xx, linestyle=':')\n ax[0].annotate(\"{:.1f}\".format(mr), xy=(xx,0.8))\n ax[1].axvline(xx, linestyle=':')\n ax[1].annotate(\"{:.1f}\".format(mr), xy=(xx,0.8))\n pdf.savefig()\n ax[0].clear()\n ax[1].clear()\n plt.close()\n\n#plt.show()\n",
"_____no_output_____"
]
],
[
[
"1. 하나의 은하를 골라서 모든 머저를 표시하고 minor merger와 major merger중 어느게 더 전체 delta lambda에 기여를 많이 하는지 확인. \n2. 모든 은하에 대해서, major merger가 많은 시기와 delta lambda가 큰 시기가 일치하는지 확인\n3. 은하단 계산 추가로 시작. (BCG를 10kpc, 15kpc 등으로 고정한 것도 추가)\n4. 나중에 트리가 바뀔 수도 있음. ",
"_____no_output_____"
]
],
[
[
"inds[0]",
"_____no_output_____"
],
[
"#%%\nimport matplotlib.pyplot as plt\n# plot each galaxy.\n# stellar mass growth and lambda_r as a function of time.\n\n# The exponent (also called ass \"offset\") in the figure (1e11)\n# overlaps with lookback time tick labels.\n# And moving the offset around is not easy. \n# So, manually divide the values.\n\n# compile mstar and lambda\nmstar = []\nlstar = []\nfor gal in mpgs:\n ind = np.where(gal.data['lambda_r'] > 0)[0]\n mstar = np.average(gal.data['mstar'][ind[-5:]])\n lstar = np.average(gal.data['lambda_r'][ind[-5:]])\n\nmm = mstar/1e10\n\nplt.close()\nplt.ioff()\n\ndef make_patch_spines_invisible(ax):\n \"\"\"\n Useful for plotting multiple variables (more than two twinx())\n \"\"\"\n ax.set_frame_on(True)\n ax.patch.set_visible(False)\n #for sp in ax.spines.itervalues(): \n # Changed in Python3\n for sp in ax.spines.values():\n sp.set_visible(False)\n\nfor i, idgal in enumerate(cat['final_gal']):\n#for i, idgal in enumerate([1618]):\n if mm[i][0] < 0.2 :\n print(idgal, mm[i][0], mm[i][-1])\n continue\n print(idgal, \"!!!!\")\n\n plt.rcParams[\"figure.figsize\"] = [12,10]\n fig, axes = plt.subplots(3)\n# plt.figure(num=1, figsize=[10,20])\n fig.suptitle(\"ID: \" + str(idgal).zfill(5), fontsize=18)#, y=1.01)\n lns1 = axes[0].plot(nouts[::-1], l_r[i], label=r\"$\\lambda_{R}$\")\n axes[0].set_xticks(z_pos)\n axes[0].set_xticklabels(z_target_str)\n plt.subplots_adjust(left = 0.1, right = 0.9, \\\n wspace = 0.1, hspace = 0.0, \\\n bottom = 0.1, top = 0.85)\n \n axes[0].set_xlim([37,187])\n axes[0].set_ylim([0,1.0])\n axes[0].set_ylabel(r\"$\\lambda_{R}$\")\n axes[0].set_xlabel(\"redshift\")\n \n# ax2 = axes[0].twinx()\n lns2 = axes[1].plot(nouts[::-1], mm[i], 'r-', label=\"stellar mass\")\n axes[1].set_ylim([0, 1.3*max(mm[i])])\n axes[1].set_xlim([37,187])\n axes[1].set_ylabel(r\"Stellar mass $[10^{10}M_{\\odot}]$\")\n axes[1].get_yaxis().get_offset_text().set_y(1)\n \n# ax3 = ax1.twinx() # Reff\n# ax3.spines[\"right\"].set_position((\"axes\", 1.2))\n# make_patch_spines_invisible(ax3)\n # Second, show the right spine.\n# ax3.spines[\"right\"].set_visible(True)\n axes[2].set_ylabel(\"Reff [kpc]\")\n axes[2].set_xlim([37,187])\n lns3 = axes[2].plot(nouts[::-1], reff[i], 'g-', label='Reff')\n\n # hide x axes so that subplots stick together.\n plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)\n \n ax4 = axes[0].twiny()\n ax4.set_xlabel(\"Lookback time\", labelpad=10)\n ax4.set_xticks(lbt_pos)\n ax4.set_xticklabels(lbt_target_str)\n lns = lns1+lns2+lns3\n labs = [l.get_label() for l in lns]\n axes[0].legend(lns, labs, loc=0)\n # logend location codes:\n # 0 ~ 10 \n # best, ur, ul, lr, ll, r, cl, cr, lower c, upper c, center\n #\n \n# plt.show()\n plt.savefig(wdir + 'catalog/' + str(idgal).zfill(5) + '.png')\n plt.close()",
"_____no_output_____"
],
[
"cat = pickle.load(open('./10002/catalog_GM/catalog105.pickle', 'rb'))",
"_____no_output_____"
],
[
"gg = mpgs[0]",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7531418ad770b2cbb3d41153237b21c9dd3b3a5 | 71,300 | ipynb | Jupyter Notebook | Course5/Week1/Dinosaurus_Island_Character_level_language_model_final_v3a.ipynb | pranavkantgaur/CourseraDLSpecialization | 6e76df71ab40cccb9762282f95531ef9d541a27f | [
"MIT"
] | null | null | null | Course5/Week1/Dinosaurus_Island_Character_level_language_model_final_v3a.ipynb | pranavkantgaur/CourseraDLSpecialization | 6e76df71ab40cccb9762282f95531ef9d541a27f | [
"MIT"
] | null | null | null | Course5/Week1/Dinosaurus_Island_Character_level_language_model_final_v3a.ipynb | pranavkantgaur/CourseraDLSpecialization | 6e76df71ab40cccb9762282f95531ef9d541a27f | [
"MIT"
] | null | null | null | 44.340796 | 1,599 | 0.579229 | [
[
[
"# Character level language model - Dinosaurus Island\n\nWelcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go berserk, so choose wisely! \n\n<table>\n<td>\n<img src=\"images/dino.jpg\" style=\"width:250;height:300px;\">\n\n</td>\n\n</table>\n\nLuckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this [dataset](dinos.txt). (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath! \n\nBy completing this assignment you will learn:\n\n- How to store text data for processing using an RNN \n- How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit\n- How to build a character-level text generation recurrent neural network\n- Why clipping the gradients is important\n\nWe will begin by loading in some functions that we have provided for you in `rnn_utils`. Specifically, you have access to functions such as `rnn_forward` and `rnn_backward` which are equivalent to those you've implemented in the previous assignment. ",
"_____no_output_____"
],
[
"## <font color='darkblue'>Updates</font>\n\n#### If you were working on the notebook before this update...\n* The current notebook is version \"3a\".\n* You can find your original work saved in the notebook with the previous version name (\"v3\") \n* To view the file directory, go to the menu \"File->Open\", and this will open a new tab that shows the file directory.\n\n#### List of updates\n* Sort and print `chars` list of characters.\n* Import and use pretty print\n* `clip`: \n - Additional details on why we need to use the \"out\" parameter.\n - Modified for loop to have students fill in the correct items to loop through.\n - Added a test case to check for hard-coding error.\n* `sample`\n - additional hints added to steps 1,2,3,4.\n - \"Using 2D arrays instead of 1D arrays\".\n - explanation of numpy.ravel().\n - fixed expected output.\n - clarified comments in the code.\n* \"training the model\"\n - Replaced the sample code with explanations for how to set the index, X and Y (for a better learning experience).\n* Spelling, grammar and wording corrections.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom utils import *\nimport random\nimport pprint",
"_____no_output_____"
]
],
[
[
"## 1 - Problem Statement\n\n### 1.1 - Dataset and Preprocessing\n\nRun the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size. ",
"_____no_output_____"
]
],
[
[
"data = open('dinos.txt', 'r').read()\ndata= data.lower()\nchars = list(set(data))\ndata_size, vocab_size = len(data), len(chars)\nprint('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))",
"There are 19909 total characters and 27 unique characters in your data.\n"
]
],
[
[
"\n* The characters are a-z (26 characters) plus the \"\\n\" (or newline character).\n* In this assignment, the newline character \"\\n\" plays a role similar to the `<EOS>` (or \"End of sentence\") token we had discussed in lecture. \n - Here, \"\\n\" indicates the end of the dinosaur name rather than the end of a sentence. \n* `char_to_ix`: In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26.\n* `ix_to_char`: We also create a second python dictionary that maps each index back to the corresponding character. \n - This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. ",
"_____no_output_____"
]
],
[
[
"chars = sorted(chars)\nprint(chars)",
"['\\n', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n"
],
[
"char_to_ix = { ch:i for i,ch in enumerate(chars) }\nix_to_char = { i:ch for i,ch in enumerate(chars) }\npp = pprint.PrettyPrinter(indent=4)\npp.pprint(ix_to_char)",
"{ 0: '\\n',\n 1: 'a',\n 2: 'b',\n 3: 'c',\n 4: 'd',\n 5: 'e',\n 6: 'f',\n 7: 'g',\n 8: 'h',\n 9: 'i',\n 10: 'j',\n 11: 'k',\n 12: 'l',\n 13: 'm',\n 14: 'n',\n 15: 'o',\n 16: 'p',\n 17: 'q',\n 18: 'r',\n 19: 's',\n 20: 't',\n 21: 'u',\n 22: 'v',\n 23: 'w',\n 24: 'x',\n 25: 'y',\n 26: 'z'}\n"
]
],
[
[
"### 1.2 - Overview of the model\n\nYour model will have the following structure: \n\n- Initialize parameters \n- Run the optimization loop\n - Forward propagation to compute the loss function\n - Backward propagation to compute the gradients with respect to the loss function\n - Clip the gradients to avoid exploding gradients\n - Using the gradients, update your parameters with the gradient descent update rule.\n- Return the learned parameters \n \n<img src=\"images/rnn.png\" style=\"width:450;height:300px;\">\n<caption><center> **Figure 1**: Recurrent Neural Network, similar to what you had built in the previous notebook \"Building a Recurrent Neural Network - Step by Step\". </center></caption>\n\n* At each time-step, the RNN tries to predict what is the next character given the previous characters. \n* The dataset $\\mathbf{X} = (x^{\\langle 1 \\rangle}, x^{\\langle 2 \\rangle}, ..., x^{\\langle T_x \\rangle})$ is a list of characters in the training set.\n* $\\mathbf{Y} = (y^{\\langle 1 \\rangle}, y^{\\langle 2 \\rangle}, ..., y^{\\langle T_x \\rangle})$ is the same list of characters but shifted one character forward. \n* At every time-step $t$, $y^{\\langle t \\rangle} = x^{\\langle t+1 \\rangle}$. The prediction at time $t$ is the same as the input at time $t + 1$.",
"_____no_output_____"
],
[
"## 2 - Building blocks of the model\n\nIn this part, you will build two important blocks of the overall model:\n- Gradient clipping: to avoid exploding gradients\n- Sampling: a technique used to generate characters\n\nYou will then apply these two functions to build the model.",
"_____no_output_____"
],
[
"### 2.1 - Clipping the gradients in the optimization loop\n\nIn this section you will implement the `clip` function that you will call inside of your optimization loop. \n\n#### Exploding gradients\n* When gradients are very large, they're called \"exploding gradients.\" \n* Exploding gradients make the training process more difficult, because the updates may be so large that they \"overshoot\" the optimal values during back propagation.\n\nRecall that your overall loop structure usually consists of:\n* forward pass, \n* cost computation, \n* backward pass, \n* parameter update. \n\nBefore updating the parameters, you will perform gradient clipping to make sure that your gradients are not \"exploding.\"\n\n#### gradient clipping\nIn the exercise below, you will implement a function `clip` that takes in a dictionary of gradients and returns a clipped version of gradients if needed. \n* There are different ways to clip gradients.\n* We will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. \n* For example, if the N=10\n - The range is [-10, 10]\n - If any component of the gradient vector is greater than 10, it is set to 10.\n - If any component of the gradient vector is less than -10, it is set to -10. \n - If any components are between -10 and 10, they keep their original values.\n\n<img src=\"images/clip.png\" style=\"width:400;height:150px;\">\n<caption><center> **Figure 2**: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into \"exploding gradient\" problems. </center></caption>\n\n**Exercise**: \nImplement the function below to return the clipped gradients of your dictionary `gradients`. \n* Your function takes in a maximum threshold and returns the clipped versions of the gradients. \n* You can check out [numpy.clip](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.clip.html). \n - You will need to use the argument \"`out = ...`\".\n - Using the \"`out`\" parameter allows you to update a variable \"in-place\".\n - If you don't use \"`out`\" argument, the clipped variable is stored in the variable \"gradient\" but does not update the gradient variables `dWax`, `dWaa`, `dWya`, `db`, `dby`.",
"_____no_output_____"
]
],
[
[
"### GRADED FUNCTION: clip\n\ndef clip(gradients, maxValue):\n '''\n Clips the gradients' values between minimum and maximum.\n \n Arguments:\n gradients -- a dictionary containing the gradients \"dWaa\", \"dWax\", \"dWya\", \"db\", \"dby\"\n maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue\n \n Returns: \n gradients -- a dictionary with the clipped gradients.\n '''\n \n dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']\n \n ### START CODE HERE ###\n # clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)\n for gradient in [dWax, dWaa, dWya, db, dby]:\n np.clip(gradient, -maxValue ,maxValue, out=gradient)\n ### END CODE HERE ###\n \n gradients = {\"dWaa\": dWaa, \"dWax\": dWax, \"dWya\": dWya, \"db\": db, \"dby\": dby}\n \n return gradients",
"_____no_output_____"
],
[
"# Test with a maxvalue of 10\nmaxValue = 10\nnp.random.seed(3)\ndWax = np.random.randn(5,3)*10\ndWaa = np.random.randn(5,5)*10\ndWya = np.random.randn(2,5)*10\ndb = np.random.randn(5,1)*10\ndby = np.random.randn(2,1)*10\ngradients = {\"dWax\": dWax, \"dWaa\": dWaa, \"dWya\": dWya, \"db\": db, \"dby\": dby}\ngradients = clip(gradients, maxValue)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])",
"gradients[\"dWaa\"][1][2] = 10.0\ngradients[\"dWax\"][3][1] = -10.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 10.]\ngradients[\"dby\"][1] = [ 8.45833407]\n"
]
],
[
[
"** Expected output:**\n\n```Python\ngradients[\"dWaa\"][1][2] = 10.0\ngradients[\"dWax\"][3][1] = -10.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 10.]\ngradients[\"dby\"][1] = [ 8.45833407]\n```",
"_____no_output_____"
]
],
[
[
"# Test with a maxValue of 5\nmaxValue = 5\nnp.random.seed(3)\ndWax = np.random.randn(5,3)*10\ndWaa = np.random.randn(5,5)*10\ndWya = np.random.randn(2,5)*10\ndb = np.random.randn(5,1)*10\ndby = np.random.randn(2,1)*10\ngradients = {\"dWax\": dWax, \"dWaa\": dWaa, \"dWya\": dWya, \"db\": db, \"dby\": dby}\ngradients = clip(gradients, maxValue)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])",
"gradients[\"dWaa\"][1][2] = 5.0\ngradients[\"dWax\"][3][1] = -5.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 5.]\ngradients[\"dby\"][1] = [ 5.]\n"
]
],
[
[
"** Expected Output: **\n```Python\ngradients[\"dWaa\"][1][2] = 5.0\ngradients[\"dWax\"][3][1] = -5.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 5.]\ngradients[\"dby\"][1] = [ 5.]\n```",
"_____no_output_____"
],
[
"### 2.2 - Sampling\n\nNow assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:\n\n<img src=\"images/dinos3.png\" style=\"width:500;height:300px;\">\n<caption><center> **Figure 3**: In this picture, we assume the model is already trained. We pass in $x^{\\langle 1\\rangle} = \\vec{0}$ at the first time step, and have the network sample one character at a time. </center></caption>",
"_____no_output_____"
],
[
"**Exercise**: Implement the `sample` function below to sample characters. You need to carry out 4 steps:\n\n- **Step 1**: Input the \"dummy\" vector of zeros $x^{\\langle 1 \\rangle} = \\vec{0}$. \n - This is the default input before we've generated any characters. \n We also set $a^{\\langle 0 \\rangle} = \\vec{0}$",
"_____no_output_____"
],
[
"- **Step 2**: Run one step of forward propagation to get $a^{\\langle 1 \\rangle}$ and $\\hat{y}^{\\langle 1 \\rangle}$. Here are the equations:\n\nhidden state: \n$$ a^{\\langle t+1 \\rangle} = \\tanh(W_{ax} x^{\\langle t+1 \\rangle } + W_{aa} a^{\\langle t \\rangle } + b)\\tag{1}$$\n\nactivation:\n$$ z^{\\langle t + 1 \\rangle } = W_{ya} a^{\\langle t + 1 \\rangle } + b_y \\tag{2}$$\n\nprediction:\n$$ \\hat{y}^{\\langle t+1 \\rangle } = softmax(z^{\\langle t + 1 \\rangle })\\tag{3}$$\n\n- Details about $\\hat{y}^{\\langle t+1 \\rangle }$:\n - Note that $\\hat{y}^{\\langle t+1 \\rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). \n - $\\hat{y}^{\\langle t+1 \\rangle}_i$ represents the probability that the character indexed by \"i\" is the next character. \n - We have provided a `softmax()` function that you can use.",
"_____no_output_____"
],
[
"#### Additional Hints\n\n- $x^{\\langle 1 \\rangle}$ is `x` in the code. When creating the one-hot vector, make a numpy array of zeros, with the number of rows equal to the number of unique characters, and the number of columns equal to one. It's a 2D and not a 1D array.\n- $a^{\\langle 0 \\rangle}$ is `a_prev` in the code. It is a numpy array of zeros, where the number of rows is $n_{a}$, and number of columns is 1. It is a 2D array as well. $n_{a}$ is retrieved by getting the number of columns in $W_{aa}$ (the numbers need to match in order for the matrix multiplication $W_{aa}a^{\\langle t \\rangle}$ to work.\n- [numpy.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html)\n- [numpy.tanh](https://docs.scipy.org/doc/numpy/reference/generated/numpy.tanh.html)",
"_____no_output_____"
],
[
"#### Using 2D arrays instead of 1D arrays\n* You may be wondering why we emphasize that $x^{\\langle 1 \\rangle}$ and $a^{\\langle 0 \\rangle}$ are 2D arrays and not 1D vectors.\n* For matrix multiplication in numpy, if we multiply a 2D matrix with a 1D vector, we end up with with a 1D array.\n* This becomes a problem when we add two arrays where we expected them to have the same shape.\n* When two arrays with a different number of dimensions are added together, Python \"broadcasts\" one across the other.\n* Here is some sample code that shows the difference between using a 1D and 2D array.",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"matrix1 = np.array([[1,1],[2,2],[3,3]]) # (3,2)\nmatrix2 = np.array([[0],[0],[0]]) # (3,1) \nvector1D = np.array([1,1]) # (2,) \nvector2D = np.array([[1],[1]]) # (2,1)\nprint(\"matrix1 \\n\", matrix1,\"\\n\")\nprint(\"matrix2 \\n\", matrix2,\"\\n\")\nprint(\"vector1D \\n\", vector1D,\"\\n\")\nprint(\"vector2D \\n\", vector2D)",
"matrix1 \n [[1 1]\n [2 2]\n [3 3]] \n\nmatrix2 \n [[0]\n [0]\n [0]] \n\nvector1D \n [1 1] \n\nvector2D \n [[1]\n [1]]\n"
],
[
"print(\"Multiply 2D and 1D arrays: result is a 1D array\\n\", \n np.dot(matrix1,vector1D))\nprint(\"Multiply 2D and 2D arrays: result is a 2D array\\n\", \n np.dot(matrix1,vector2D))",
"Multiply 2D and 1D arrays: result is a 1D array\n [2 4 6]\nMultiply 2D and 2D arrays: result is a 2D array\n [[2]\n [4]\n [6]]\n"
],
[
"print(\"Adding (3 x 1) vector to a (3 x 1) vector is a (3 x 1) vector\\n\",\n \"This is what we want here!\\n\", \n np.dot(matrix1,vector2D) + matrix2)",
"Adding (3 x 1) vector to a (3 x 1) vector is a (3 x 1) vector\n This is what we want here!\n [[2]\n [4]\n [6]]\n"
],
[
"print(\"Adding a (3,) vector to a (3 x 1) vector\\n\",\n \"broadcasts the 1D array across the second dimension\\n\",\n \"Not what we want here!\\n\",\n np.dot(matrix1,vector1D) + matrix2\n )",
"Adding a (3,) vector to a (3 x 1) vector\n broadcasts the 1D array across the second dimension\n Not what we want here!\n [[2 4 6]\n [2 4 6]\n [2 4 6]]\n"
]
],
[
[
"- **Step 3**: Sampling: \n - Now that we have $y^{\\langle t+1 \\rangle}$, we want to select the next letter in the dinosaur name. If we select the most probable, the model will always generate the same result given a starting letter. \n - To make the results more interesting, we will use np.random.choice to select a next letter that is likely, but not always the same.\n - Sampling is the selection of a value from a group of values, where each value has a probability of being picked. \n - Sampling allows us to generate random sequences of values.\n - Pick the next character's index according to the probability distribution specified by $\\hat{y}^{\\langle t+1 \\rangle }$. \n - This means that if $\\hat{y}^{\\langle t+1 \\rangle }_i = 0.16$, you will pick the index \"i\" with 16% probability. \n - You can use [np.random.choice](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.choice.html).\n\n Example of how to use `np.random.choice()`:\n ```python\n np.random.seed(0)\n probs = np.array([0.1, 0.0, 0.7, 0.2])\n idx = np.random.choice([0, 1, 2, 3] p = probs)\n ```\n - This means that you will pick the index (`idx`) according to the distribution: \n\n $P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.\n\n - Note that the value that's set to `p` should be set to a 1D vector.\n - Also notice that $\\hat{y}^{\\langle t+1 \\rangle}$, which is `y` in the code, is a 2D array.",
"_____no_output_____"
],
[
"##### Additional Hints\n- [range](https://docs.python.org/3/library/functions.html#func-range)\n- [numpy.ravel](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html) takes a multi-dimensional array and returns its contents inside of a 1D vector.\n```Python\narr = np.array([[1,2],[3,4]])\nprint(\"arr\")\nprint(arr)\nprint(\"arr.ravel()\")\nprint(arr.ravel())\n```\nOutput:\n```Python\narr\n[[1 2]\n [3 4]]\narr.ravel()\n[1 2 3 4]\n```\n\n- Note that `append` is an \"in-place\" operation. In other words, don't do this:\n```Python\nfun_hobbies = fun_hobbies.append('learning') ## Doesn't give you what you want\n```",
"_____no_output_____"
],
[
"- **Step 4**: Update to $x^{\\langle t \\rangle }$ \n - The last step to implement in `sample()` is to update the variable `x`, which currently stores $x^{\\langle t \\rangle }$, with the value of $x^{\\langle t + 1 \\rangle }$. \n - You will represent $x^{\\langle t + 1 \\rangle }$ by creating a one-hot vector corresponding to the character that you have chosen as your prediction. \n - You will then forward propagate $x^{\\langle t + 1 \\rangle }$ in Step 1 and keep repeating the process until you get a \"\\n\" character, indicating that you have reached the end of the dinosaur name. ",
"_____no_output_____"
],
[
"##### Additional Hints\n- In order to reset `x` before setting it to the new one-hot vector, you'll want to set all the values to zero.\n - You can either create a new numpy array: [numpy.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html)\n - Or fill all values with a single number: [numpy.ndarray.fill](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.fill.html)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: sample\n\ndef sample(parameters, char_to_ix, seed):\n \"\"\"\n Sample a sequence of characters according to a sequence of probability distributions output of the RNN\n\n Arguments:\n parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b. \n char_to_ix -- python dictionary mapping each character to an index.\n seed -- used for grading purposes. Do not worry about it.\n\n Returns:\n indices -- a list of length n containing the indices of the sampled characters.\n \"\"\"\n \n # Retrieve parameters and relevant shapes from \"parameters\" dictionary\n Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']\n vocab_size = by.shape[0]\n n_a = Waa.shape[1]\n \n ### START CODE HERE ###\n # Step 1: Create the a zero vector x that can be used as the one-hot vector \n # representing the first character (initializing the sequence generation). (≈1 line)\n x = np.zeros((vocab_size, 1))\n # Step 1': Initialize a_prev as zeros (≈1 line)\n a_prev = np.zeros((n_a, 1))\n \n # Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)\n indices = []\n \n # idx is the index of the one-hot vector x that is set to 1\n # All other positions in x are zero.\n # We will initialize idx to -1\n idx = -1 \n \n # Loop over time-steps t. At each time-step:\n # sample a character from a probability distribution \n # and append its index (`idx`) to the list \"indices\". \n # We'll stop if we reach 50 characters \n # (which should be very unlikely with a well trained model).\n # Setting the maximum number of characters helps with debugging and prevents infinite loops. \n counter = 0\n newline_character = char_to_ix['\\n']\n \n while (idx != newline_character and counter != 50):\n \n # Step 2: Forward propagate x using the equations (1), (2) and (3)\n a = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b)\n z = np.dot(Wya, a) + by\n y = softmax(z)\n \n # for grading purposes\n np.random.seed(counter+seed) \n \n # Step 3: Sample the index of a character within the vocabulary from the probability distribution y\n # (see additional hints above)\n idx = np.random.choice(vocab_size, p = y.ravel())\n\n # Append the index to \"indices\"\n indices.append(idx)\n \n # Step 4: Overwrite the input x with one that corresponds to the sampled index `idx`.\n # (see additional hints above)\n x = np.zeros((vocab_size, 1))\n x[idx] = 1\n \n # Update \"a_prev\" to be \"a\"\n a_prev = a\n \n # for grading purposes\n seed += 1\n counter +=1\n \n ### END CODE HERE ###\n\n if (counter == 50):\n indices.append(char_to_ix['\\n'])\n \n return indices",
"_____no_output_____"
],
[
"np.random.seed(2)\n_, n_a = 20, 100\nWax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)\nb, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b, \"by\": by}\n\n\nindices = sample(parameters, char_to_ix, 0)\nprint(\"Sampling:\")\nprint(\"list of sampled indices:\\n\", indices)\nprint(\"list of sampled characters:\\n\", [ix_to_char[i] for i in indices])",
"Sampling:\nlist of sampled indices:\n [12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, 7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 17, 24, 12, 13, 24, 0]\nlist of sampled characters:\n ['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', 'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', 'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'q', 'x', 'l', 'm', 'x', '\\n']\n"
]
],
[
[
"** Expected output:**\n\n```Python\nSampling:\nlist of sampled indices:\n [12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, 7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 17, 24, 12, 13, 24, 0]\nlist of sampled characters:\n ['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', 'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', 'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'q', 'x', 'l', 'm', 'x', '\\n']\n```\n\n* Please note that over time, if there are updates to the back-end of the Coursera platform (that may update the version of numpy), the actual list of sampled indices and sampled characters may change. \n* If you follow the instructions given above and get an output without errors, it's possible the routine is correct even if your output doesn't match the expected output. Submit your assignment to the grader to verify its correctness.",
"_____no_output_____"
],
[
"## 3 - Building the language model \n\nIt is time to build the character-level language model for text generation. \n\n\n### 3.1 - Gradient descent \n\n* In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). \n* You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. \n\nAs a reminder, here are the steps of a common optimization loop for an RNN:\n\n- Forward propagate through the RNN to compute the loss\n- Backward propagate through time to compute the gradients of the loss with respect to the parameters\n- Clip the gradients\n- Update the parameters using gradient descent \n\n**Exercise**: Implement the optimization process (one step of stochastic gradient descent). \n\nThe following functions are provided:\n\n```python\ndef rnn_forward(X, Y, a_prev, parameters):\n \"\"\" Performs the forward propagation through the RNN and computes the cross-entropy loss.\n It returns the loss' value as well as a \"cache\" storing values to be used in backpropagation.\"\"\"\n ....\n return loss, cache\n \ndef rnn_backward(X, Y, parameters, cache):\n \"\"\" Performs the backward propagation through time to compute the gradients of the loss with respect\n to the parameters. It returns also all the hidden states.\"\"\"\n ...\n return gradients, a\n\ndef update_parameters(parameters, gradients, learning_rate):\n \"\"\" Updates parameters using the Gradient Descent Update Rule.\"\"\"\n ...\n return parameters\n```\n\nRecall that you previously implemented the `clip` function:\n\n```Python\ndef clip(gradients, maxValue)\n \"\"\"Clips the gradients' values between minimum and maximum.\"\"\"\n ...\n return gradients\n```",
"_____no_output_____"
],
[
"#### parameters\n\n* Note that the weights and biases inside the `parameters` dictionary are being updated by the optimization, even though `parameters` is not one of the returned values of the `optimize` function. The `parameters` dictionary is passed by reference into the function, so changes to this dictionary are making changes to the `parameters` dictionary even when accessed outside of the function.\n* Python dictionaries and lists are \"pass by reference\", which means that if you pass a dictionary into a function and modify the dictionary within the function, this changes that same dictionary (it's not a copy of the dictionary).",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: optimize\n\ndef optimize(X, Y, a_prev, parameters, learning_rate = 0.01):\n \"\"\"\n Execute one step of the optimization to train the model.\n \n Arguments:\n X -- list of integers, where each integer is a number that maps to a character in the vocabulary.\n Y -- list of integers, exactly the same as X but shifted one index to the left.\n a_prev -- previous hidden state.\n parameters -- python dictionary containing:\n Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)\n Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)\n Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n b -- Bias, numpy array of shape (n_a, 1)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n learning_rate -- learning rate for the model.\n \n Returns:\n loss -- value of the loss function (cross-entropy)\n gradients -- python dictionary containing:\n dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)\n dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)\n dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)\n db -- Gradients of bias vector, of shape (n_a, 1)\n dby -- Gradients of output bias vector, of shape (n_y, 1)\n a[len(X)-1] -- the last hidden state, of shape (n_a, 1)\n \"\"\"\n \n ### START CODE HERE ###\n \n # Forward propagate through time (≈1 line)\n loss, cache = rnn_forward(X, Y, a_prev, parameters)\n \n # Backpropagate through time (≈1 line)\n gradients, a = rnn_backward(X, Y, parameters, cache)\n \n # Clip your gradients between -5 (min) and 5 (max) (≈1 line)\n gradients = clip(gradients, 5)\n \n # Update parameters (≈1 line)\n parameters = update_parameters(parameters, gradients, learning_rate)\n \n ### END CODE HERE ###\n \n return loss, gradients, a[len(X)-1]",
"_____no_output_____"
],
[
"np.random.seed(1)\nvocab_size, n_a = 27, 100\na_prev = np.random.randn(n_a, 1)\nWax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)\nb, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b, \"by\": by}\nX = [12,3,5,11,22,3]\nY = [4,14,11,22,25, 26]\n\nloss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)\nprint(\"Loss =\", loss)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"np.argmax(gradients[\\\"dWax\\\"]) =\", np.argmax(gradients[\"dWax\"]))\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])\nprint(\"a_last[4] =\", a_last[4])",
"Loss = 126.503975722\ngradients[\"dWaa\"][1][2] = 0.194709315347\nnp.argmax(gradients[\"dWax\"]) = 93\ngradients[\"dWya\"][1][2] = -0.007773876032\ngradients[\"db\"][4] = [-0.06809825]\ngradients[\"dby\"][1] = [ 0.01538192]\na_last[4] = [-1.]\n"
]
],
[
[
"** Expected output:**\n\n```Python\nLoss = 126.503975722\ngradients[\"dWaa\"][1][2] = 0.194709315347\nnp.argmax(gradients[\"dWax\"]) = 93\ngradients[\"dWya\"][1][2] = -0.007773876032\ngradients[\"db\"][4] = [-0.06809825]\ngradients[\"dby\"][1] = [ 0.01538192]\na_last[4] = [-1.]\n```",
"_____no_output_____"
],
[
"### 3.2 - Training the model ",
"_____no_output_____"
],
[
"* Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. \n* Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. \n* Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order. \n\n**Exercise**: Follow the instructions and implement `model()`. When `examples[index]` contains one dinosaur name (string), to create an example (X, Y), you can use this:\n\n##### Set the index `idx` into the list of examples\n* Using the for-loop, walk through the shuffled list of dinosaur names in the list \"examples\".\n* If there are 100 examples, and the for-loop increments the index to 100 onwards, think of how you would make the index cycle back to 0, so that we can continue feeding the examples into the model when j is 100, 101, etc.\n* Hint: 101 divided by 100 is zero with a remainder of 1.\n* `%` is the modulus operator in python.\n\n##### Extract a single example from the list of examples\n* `single_example`: use the `idx` index that you set previously to get one word from the list of examples.",
"_____no_output_____"
],
[
"##### Convert a string into a list of characters: `single_example_chars`\n* `single_example_chars`: A string is a list of characters.\n* You can use a list comprehension (recommended over for-loops) to generate a list of characters.\n```Python\nstr = 'I love learning'\nlist_of_chars = [c for c in str]\nprint(list_of_chars)\n```\n\n```\n['I', ' ', 'l', 'o', 'v', 'e', ' ', 'l', 'e', 'a', 'r', 'n', 'i', 'n', 'g']\n```",
"_____no_output_____"
],
[
"##### Convert list of characters to a list of integers: `single_example_ix`\n* Create a list that contains the index numbers associated with each character.\n* Use the dictionary `char_to_ix`\n* You can combine this with the list comprehension that is used to get a list of characters from a string.\n* This is a separate line of code below, to help learners clarify each step in the function.",
"_____no_output_____"
],
[
"##### Create the list of input characters: `X`\n* `rnn_forward` uses the `None` value as a flag to set the input vector as a zero-vector.\n* Prepend the `None` value in front of the list of input characters.\n* There is more than one way to prepend a value to a list. One way is to add two lists together: `['a'] + ['b']`",
"_____no_output_____"
],
[
"##### Get the integer representation of the newline character `ix_newline`\n* `ix_newline`: The newline character signals the end of the dinosaur name.\n - get the integer representation of the newline character `'\\n'`.\n - Use `char_to_ix`",
"_____no_output_____"
],
[
"##### Set the list of labels (integer representation of the characters): `Y`\n* The goal is to train the RNN to predict the next letter in the name, so the labels are the list of characters that are one time step ahead of the characters in the input `X`.\n - For example, `Y[0]` contains the same value as `X[1]` \n* The RNN should predict a newline at the last letter so add ix_newline to the end of the labels. \n - Append the integer representation of the newline character to the end of `Y`.\n - Note that `append` is an in-place operation.\n - It might be easier for you to add two lists together.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: model\n\ndef model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):\n \"\"\"\n Trains the model and generates dinosaur names. \n \n Arguments:\n data -- text corpus\n ix_to_char -- dictionary that maps the index to a character\n char_to_ix -- dictionary that maps a character to an index\n num_iterations -- number of iterations to train the model for\n n_a -- number of units of the RNN cell\n dino_names -- number of dinosaur names you want to sample at each iteration. \n vocab_size -- number of unique characters found in the text (size of the vocabulary)\n \n Returns:\n parameters -- learned parameters\n \"\"\"\n \n # Retrieve n_x and n_y from vocab_size\n n_x, n_y = vocab_size, vocab_size\n \n # Initialize parameters\n parameters = initialize_parameters(n_a, n_x, n_y)\n \n # Initialize loss (this is required because we want to smooth our loss)\n loss = get_initial_loss(vocab_size, dino_names)\n \n # Build list of all dinosaur names (training examples).\n with open(\"dinos.txt\") as f:\n examples = f.readlines()\n examples = [x.lower().strip() for x in examples]\n \n # Shuffle list of all dinosaur names\n np.random.seed(0)\n np.random.shuffle(examples)\n \n # Initialize the hidden state of your LSTM\n a_prev = np.zeros((n_a, 1))\n \n # Optimization loop\n for j in range(num_iterations):\n \n ### START CODE HERE ###\n \n # Set the index `idx` (see instructions above)\n idx = j % len(examples)\n \n # Set the input X (see instructions above)\n single_example = examples[idx]\n single_example_chars = [c for c in examples[idx]]\n single_example_ix = [char_to_ix[i] for i in single_example_chars]\n X = [None] + single_example_ix\n \n # Set the labels Y (see instructions above)\n ix_newline = char_to_ix['\\n']\n Y = single_example_ix + [ix_newline]\n \n # Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters\n # Choose a learning rate of 0.01\n curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)\n \n ### END CODE HERE ###\n \n # Use a latency trick to keep the loss smooth. It happens here to accelerate the training.\n loss = smooth(loss, curr_loss)\n\n # Every 2000 Iteration, generate \"n\" characters thanks to sample() to check if the model is learning properly\n if j % 2000 == 0:\n \n print('Iteration: %d, Loss: %f' % (j, loss) + '\\n')\n \n # The number of dinosaur names to print\n seed = 0\n for name in range(dino_names):\n \n # Sample indices and print them\n sampled_indices = sample(parameters, char_to_ix, seed)\n print_sample(sampled_indices, ix_to_char)\n \n seed += 1 # To get the same result (for grading purposes), increment the seed by one. \n \n print('\\n')\n \n return parameters",
"_____no_output_____"
]
],
[
[
"Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names. ",
"_____no_output_____"
]
],
[
[
"parameters = model(data, ix_to_char, char_to_ix)",
"Iteration: 0, Loss: 23.087336\n\nNkzxwtdmfqoeyhsqwasjkjvu\nKneb\nKzxwtdmfqoeyhsqwasjkjvu\nNeb\nZxwtdmfqoeyhsqwasjkjvu\nEb\nXwtdmfqoeyhsqwasjkjvu\n\n\nIteration: 2000, Loss: 27.884160\n\nLiusskeomnolxeros\nHmdaairus\nHytroligoraurus\nLecalosapaus\nXusicikoraurus\nAbalpsamantisaurus\nTpraneronxeros\n\n\nIteration: 4000, Loss: 25.901815\n\nMivrosaurus\nInee\nIvtroplisaurus\nMbaaisaurus\nWusichisaurus\nCabaselachus\nToraperlethosdarenitochusthiamamumamaon\n\n\nIteration: 6000, Loss: 24.608779\n\nOnwusceomosaurus\nLieeaerosaurus\nLxussaurus\nOma\nXusteonosaurus\nEeahosaurus\nToreonosaurus\n\n\nIteration: 8000, Loss: 24.070350\n\nOnxusichepriuon\nKilabersaurus\nLutrodon\nOmaaerosaurus\nXutrcheps\nEdaksoje\nTrodiktonus\n\n\nIteration: 10000, Loss: 23.844446\n\nOnyusaurus\nKlecalosaurus\nLustodon\nOla\nXusodonia\nEeaeosaurus\nTroceosaurus\n\n\nIteration: 12000, Loss: 23.291971\n\nOnyxosaurus\nKica\nLustrepiosaurus\nOlaagrraiansaurus\nYuspangosaurus\nEealosaurus\nTrognesaurus\n\n\nIteration: 14000, Loss: 23.382338\n\nMeutromodromurus\nInda\nIutroinatorsaurus\nMaca\nYusteratoptititan\nCa\nTroclosaurus\n\n\nIteration: 16000, Loss: 23.255630\n\nMeustolkanolus\nIndabestacarospceryradwalosaurus\nJustolopinaveraterasauracoptelalenyden\nMaca\nYusocles\nDaahosaurus\nTrodon\n\n\nIteration: 18000, Loss: 22.905483\n\nPhytronn\nMeicanstolanthus\nMustrisaurus\nPegalosaurus\nYuskercis\nEgalosaurus\nTromelosaurus\n\n\nIteration: 20000, Loss: 22.873854\n\nNlyushanerohyisaurus\nLoga\nLustrhigosaurus\nNedalosaurus\nYuslangosaurus\nElagosaurus\nTrrangosaurus\n\n\nIteration: 22000, Loss: 22.710545\n\nOnyxromicoraurospareiosatrus\nLiga\nMustoffankeugoptardoros\nOla\nYusodogongterosaurus\nEhaerona\nTrododongxernochenhus\n\n\nIteration: 24000, Loss: 22.604827\n\nMeustognathiterhucoplithaloptha\nJigaadosaurus\nKurrodon\nMecaistheansaurus\nYuromelosaurus\nEiaeropeeton\nTroenathiteritaus\n\n\nIteration: 26000, Loss: 22.714486\n\nNhyxosaurus\nKola\nLvrosaurus\nNecalosaurus\nYurolonlus\nEjakosaurus\nTroindronykus\n\n\nIteration: 28000, Loss: 22.647640\n\nOnyxosaurus\nLoceahosaurus\nLustleonlonx\nOlabasicachudrakhurgawamosaurus\nYtrojianiisaurus\nEladon\nTromacimathoshargicitan\n\n\nIteration: 30000, Loss: 22.598485\n\nOryuton\nLocaaesaurus\nLustoendosaurus\nOlaahus\nYusaurus\nEhadopldarshuellus\nTroia\n\n\nIteration: 32000, Loss: 22.211861\n\nMeutronlapsaurus\nKracallthcaps\nLustrathus\nMacairugeanosaurus\nYusidoneraverataus\nEialosaurus\nTroimaniathonsaurus\n\n\nIteration: 34000, Loss: 22.447230\n\nOnyxipaledisons\nKiabaeropa\nLussiamang\nPacaeptabalsaurus\nXosalong\nEiacoteg\nTroia\n\n\n"
]
],
[
[
"** Expected Output**\n\nThe output of your model may look different, but it will look something like this:\n\n```Python\nIteration: 34000, Loss: 22.447230\n\nOnyxipaledisons\nKiabaeropa\nLussiamang\nPacaeptabalsaurus\nXosalong\nEiacoteg\nTroia\n```",
"_____no_output_____"
],
[
"## Conclusion\n\nYou can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implementation generated some really cool names like `maconucon`, `marloralus` and `macingsersaurus`. Your model hopefully also learned that dinosaur names tend to end in `saurus`, `don`, `aura`, `tor`, etc.\n\nIf your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, `dromaeosauroides` is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest! \n\nThis assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favorite name is the great, undefeatable, and fierce: Mangosaurus!\n\n<img src=\"images/mangosaurus.jpeg\" style=\"width:250;height:300px;\">",
"_____no_output_____"
],
[
"## 4 - Writing like Shakespeare\n\nThe rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative. \n\nA similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in the sequence. These long term dependencies were less important with dinosaur names, since the names were quite short. \n\n\n<img src=\"images/shakespeare.jpg\" style=\"width:500;height:400px;\">\n<caption><center> Let's become poets! </center></caption>\n\nWe have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes. ",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking\nfrom keras.layers import LSTM\nfrom keras.utils.data_utils import get_file\nfrom keras.preprocessing.sequence import pad_sequences\nfrom shakespeare_utils import *\nimport sys\nimport io",
"Using TensorFlow backend.\n"
]
],
[
[
"To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called [*\"The Sonnets\"*](shakespeare.txt). ",
"_____no_output_____"
],
[
"Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run `generate_output`, which will prompt asking you for an input (`<`40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try \"Forsooth this maketh no sense \" (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well. \n",
"_____no_output_____"
]
],
[
[
"print_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\nmodel.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])",
"Epoch 1/1\n 7552/31412 [======>.......................] - ETA: 203s - loss: 3.2102"
],
[
"# Run this cell to try with different inputs without having to re-train the model \ngenerate_output()",
"_____no_output_____"
]
],
[
[
"The RNN-Shakespeare model is very similar to the one you have built for dinosaur names. The only major differences are:\n- LSTMs instead of the basic RNN to capture longer-range dependencies\n- The model is a deeper, stacked LSTM model (2 layer)\n- Using Keras instead of python to simplify the code \n\nIf you want to learn more, you can also check out the Keras Team's text generation implementation on GitHub: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py.\n\nCongratulations on finishing this notebook! ",
"_____no_output_____"
],
[
"**References**:\n- This exercise took inspiration from Andrej Karpathy's implementation: https://gist.github.com/karpathy/d4dee566867f8291f086. To learn more about text generation, also check out Karpathy's [blog post](http://karpathy.github.io/2015/05/21/rnn-effectiveness/).\n- For the Shakespearian poem generator, our implementation was based on the implementation of an LSTM text generator by the Keras team: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e753170f56b69026d7d9565c3a0b4b6a77c10abe | 12,412 | ipynb | Jupyter Notebook | _posts/scikit/randomly-generated-classification-dataset/Plot-randomly-generated-classification-dataset.ipynb | bmb804/documentation | 57826d25e0afea7fff6a8da9abab8be2f7a4b48c | [
"CC-BY-3.0"
] | 2 | 2019-06-24T23:55:53.000Z | 2019-07-08T12:22:56.000Z | _posts/scikit/randomly-generated-classification-dataset/Plot-randomly-generated-classification-dataset.ipynb | bmb804/documentation | 57826d25e0afea7fff6a8da9abab8be2f7a4b48c | [
"CC-BY-3.0"
] | 15 | 2020-06-30T21:21:30.000Z | 2021-08-02T21:16:33.000Z | _posts/scikit/randomly-generated-classification-dataset/Plot-randomly-generated-classification-dataset.ipynb | bmb804/documentation | 57826d25e0afea7fff6a8da9abab8be2f7a4b48c | [
"CC-BY-3.0"
] | 1 | 2019-11-10T04:01:48.000Z | 2019-11-10T04:01:48.000Z | 37.841463 | 471 | 0.538108 | [
[
[
"Plot several randomly generated 2D classification datasets. This example illustrates the **datasets.make_classification datasets.make_blobs** and **datasets.make_gaussian_quantiles** functions.\n\nFor `make_classification`, three binary and two multi-class classification datasets are generated, with different numbers of informative features and clusters per class.",
"_____no_output_____"
],
[
"#### New to Plotly?\nPlotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).\n<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).\n<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!",
"_____no_output_____"
],
[
"### Version",
"_____no_output_____"
]
],
[
[
"import sklearn\nsklearn.__version__",
"_____no_output_____"
]
],
[
[
"### Imports",
"_____no_output_____"
],
[
"This tutorial imports [make_classification](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html#sklearn.datasets.make_classification), [make_blobs](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html#sklearn.datasets.make_blobs) and [make_gaussian_quantiles](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_gaussian_quantiles.html#sklearn.datasets.make_gaussian_quantiles).",
"_____no_output_____"
]
],
[
[
"import plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly import tools\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_blobs\nfrom sklearn.datasets import make_gaussian_quantiles",
"_____no_output_____"
]
],
[
[
"### Plot Dataset",
"_____no_output_____"
]
],
[
[
"fig = tools.make_subplots(rows=3, cols=2,\n print_grid=False,\n subplot_titles=(\"One informative feature, one cluster per class\",\n \"Two informative features, one cluster per class\",\n \"Two informative features, two clusters per class\",\n \"Multi-class, two informative features, one cluster\",\n \"Three blobs\",\n \"Gaussian divided into three quantiles\",))",
"_____no_output_____"
],
[
"X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,\n n_clusters_per_class=1)\n\none_informative = go.Scatter(x=X1[:, 0], y=X1[:, 1], \n mode='markers',\n showlegend=False,\n marker=dict(color=Y1,\n line=dict(color='black', width=1))\n )\n\nfig.append_trace(one_informative, 1, 1)\n\nX1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,\n n_clusters_per_class=1)\n\ntwo_informative1 = go.Scatter(x=X1[:, 0], y=X1[:, 1],\n mode='markers',\n showlegend=False,\n marker=dict(color=Y1,\n line=dict(color='black', width=1))\n )\n\nfig.append_trace(two_informative1, 1, 2)\n\nX2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)\ntwo_informative2 = go.Scatter(x=X2[:, 0], y=X2[:, 1],\n mode='markers',\n showlegend=False,\n marker=dict(color=Y1,\n line=dict(color='black', width=1))\n )\n\n\nfig.append_trace(two_informative2, 2, 1)\n\nX1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,\n n_clusters_per_class=1, n_classes=3)\n\nmulticlass = go.Scatter(x=X1[:, 0], y=X1[:, 1],\n mode='markers',\n showlegend=False,\n marker=dict(color=Y1,\n line=dict(color='black', width=1))\n )\n\nfig.append_trace(two_informative2, 2, 2)\n\nX1, Y1 = make_blobs(n_features=2, centers=3)\nthree_blobs = go.Scatter(x=X1[:, 0], y=X1[:, 1], \n mode='markers',\n showlegend=False,\n marker=dict(color=Y1,\n line=dict(color='black', width=1))\n )\nfig.append_trace(three_blobs, 3, 1)\n\nX1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)\ngaussian = go.Scatter(x=X1[:, 0], y=X1[:, 1], \n mode='markers',\n showlegend=False,\n marker=dict(color=Y1,\n line=dict(color='black', width=1))\n )\nfig.append_trace(gaussian, 3, 2)\n",
"_____no_output_____"
],
[
"fig['layout'].update(height=900)\n\nfor i in map(str, range(1, 7)):\n x = 'xaxis' + i\n y = 'yaxis' + i\n fig['layout'][x].update(zeroline=False, showgrid=False)\n fig['layout'][y].update(zeroline=False, showgrid=False)\n \npy.iplot(fig) ",
"_____no_output_____"
],
[
"from IPython.display import display, HTML\n\ndisplay(HTML('<link href=\"//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700\" rel=\"stylesheet\" type=\"text/css\" />'))\ndisplay(HTML('<link rel=\"stylesheet\" type=\"text/css\" href=\"http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css\">'))\n\n! pip install git+https://github.com/plotly/publisher.git --upgrade\nimport publisher\npublisher.publish(\n 'Plot-randomly-generated-classification-dataset.ipynb', 'scikit-learn/plot-random-dataset/', 'Randomly Generated Classification Dataset| plotly',\n ' ',\n title = 'Randomly Generated Classification Dataset | plotly',\n name = 'Randomly Generated Classification Dataset',\n has_thumbnail='true', thumbnail='thumbnail/random_dataset.jpg', \n language='scikit-learn', page_type='example_index',\n display_as='dataset', order=3,\n ipynb= '~Diksha_Gabha/2904')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.