hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d056a6752e5b24280499f4a5dec912493a8b0168
10,070
ipynb
Jupyter Notebook
Mediacloud_Hierarchical_clustering.ipynb
gesiscss/media_frames
6c1403c07bd118b11a4f2ab35abd34cc09cb50de
[ "MIT" ]
null
null
null
Mediacloud_Hierarchical_clustering.ipynb
gesiscss/media_frames
6c1403c07bd118b11a4f2ab35abd34cc09cb50de
[ "MIT" ]
null
null
null
Mediacloud_Hierarchical_clustering.ipynb
gesiscss/media_frames
6c1403c07bd118b11a4f2ab35abd34cc09cb50de
[ "MIT" ]
1
2021-09-14T16:21:37.000Z
2021-09-14T16:21:37.000Z
37.296296
153
0.627905
[ [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport ast \nfrom gensim.corpora import Dictionary\nfrom gensim.models.coherencemodel import CoherenceModel\nimport gensim\nfrom sklearn.cluster import AgglomerativeClustering\nfrom scipy.cluster.hierarchy import ward, dendrogram\nfrom sklearn.decomposition import TruncatedSVD\nfrom collections import Counter\nfrom sklearn.manifold import TSNE\nimport matplotlib.cm as cm\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.cluster.hierarchy import ward, dendrogram, fcluster, single, complete\nfrom sklearn.metrics import silhouette_score", "_____no_output_____" ], [ "BIGRAMS = True #a parameter that specifies if unigrams (false) or bigrams (true) are used\n\ndef dummy_fun(doc):\n return doc\n\ncv = CountVectorizer(analyzer='word',\n tokenizer=dummy_fun,\n preprocessor=dummy_fun,\n token_pattern=None)\n\ndef make_bigrams(bigram_mod, texts):\n return [bigram_mod[doc] for doc in texts]\n\ndef read_and_vectorize(path, cv, bigrams):\n df = pd.read_csv(path)\n df['tokens'] = df['tokens'].apply(ast.literal_eval) #transforming string of tokens to list\n if bigrams == True: #specify if bigrams or unigrams are used for future clustering\n bigram = gensim.models.Phrases(df['tokens'], min_count=3, threshold=50) # higher threshold fewer phrases.\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n df['bigrams'] = make_bigrams(bigram_mod, df['tokens'])\n print('Bigrams are created.')\n data = cv.fit_transform(df['bigrams'])\n else:\n data = cv.fit_transform(df['tokens'])\n terms = cv.get_feature_names()\n print(f'Len of terms: {len(terms)}')\n tfidf_transformer = TfidfTransformer()\n tfidf_matrix = tfidf_transformer.fit_transform(data)\n print(f'Tfidf matrix is generated of shape {tfidf_matrix.shape}')\n return df, tfidf_matrix, terms\n\ndf_feb, tfidf_matrix_feb, terms_feb = read_and_vectorize('preprocessed_results/mediacloud_parsed_corona_df_feb_sample.csv', cv, BIGRAMS)\ndf_may, tfidf_matrix_may, terms_may = read_and_vectorize('preprocessed_results/mediacloud_parsed_corona_df_may_sample.csv', cv, BIGRAMS)\ndf_sep, tfidf_matrix_sep, terms_sep = read_and_vectorize('preprocessed_results/mediacloud_parsed_corona_df_sep_sample.csv', cv, BIGRAMS)", "_____no_output_____" ], [ "def read_best_kmeans_model(path):\n models_df = pd.read_csv(path)\n best_model = models_df.iloc[models_df['Coherence'].idxmax()]\n return best_model, models_df\n\nbest_model_feb, models_df_feb = read_best_kmeans_model('preprocessed_results/models_df_feb.csv')\nbest_model_may, models_df_may = read_best_kmeans_model('preprocessed_results/models_df_may.csv')\nbest_model_sep, models_df_sep = read_best_kmeans_model('preprocessed_results/models_df_sep.csv')", "_____no_output_____" ], [ "def transform(tfidf_matrix):\n transformed_tokens = np.empty((tfidf_matrix.shape[0], 0)).tolist()\n for i in range(tfidf_matrix.shape[0]):\n transformed_tokens[i] = tfidf_matrix[i].toarray()[0]\n print(f'Matrix is tranformed into array of len {len(transformed_tokens)}')\n return np.array(transformed_tokens)\n\ndef plot_linkage(linkage_matrix, clusters):\n fig, ax = plt.subplots(figsize=(15, 20)) # set size\n ax = dendrogram(linkage_matrix, orientation=\"right\", labels=clusters)\n\n plt.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off')\n\n plt.tight_layout()\n\ndef hierarchical_clustering(best_model, tfidf_matrix, cluster):\n random_state = 20\n transformed_tokens = transform(tfidf_matrix)\n \n model = KMeans(n_clusters=best_model['Num_Topics'], init='k-means++', max_iter=100, n_init=1, random_state = random_state)\n clusters = model.fit_predict(transformed_tokens)\n selected_features = [transformed_tokens[i] for i in range(len(transformed_tokens)) if clusters[i]==cluster]\n svd = TruncatedSVD(n_components=100, random_state=random_state)\n features = svd.fit_transform(selected_features)\n print(features.shape)\n linkage_matrix = ward(features)\n plot_linkage(linkage_matrix, clusters)\n return features, transformed_tokens, linkage_matrix, clusters\n\nfeatures_hierarchical_feb, transformed_tokens_feb, linkage_matrix_feb, clusters_feb = hierarchical_clustering(best_model_feb, tfidf_matrix_feb, 6)", "_____no_output_____" ], [ "def agglomerative_clustering(n_clusters, features, df, cluster, best_model, transformed_tokens, clusters):\n random_state=20\n model_hierarchical = AgglomerativeClustering(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n model_hierarchical.fit_predict(features)\n \n# model = KMeans(n_clusters=best_model['Num_Topics'], init='k-means++', max_iter=100, n_init=1, random_state = random_state)\n# clusters = model.fit_predict(transformed_tokens)\n df = df[clusters==cluster]\n \n for label in range(model_hierarchical.n_clusters_):\n print(label)\n display(df[model_hierarchical.labels_==label]['title'])\n \nagglomerative_clustering(23, features_hierarchical_feb, df_feb, 6, best_model_feb, transformed_tokens_feb, clusters_feb)", "_____no_output_____" ], [ "def silhouette_k(distance_matrix, linkage_matrix, max_k=20):\n scores = []\n for i in range(2, max_k+1):\n clusters = fcluster(linkage_matrix, i, criterion='maxclust')\n score = silhouette_score(distance_matrix, clusters, metric='precomputed')\n print(\"Silhouette score with {} clusters:\".format(i), score)\n scores.append(score)\n plt.title(\"Silhouette score vs. number of clusters\")\n plt.xlabel(\"# of clusters\")\n plt.ylabel(\"Score (higher is better)\")\n plt.plot(np.arange(2, max_k+1), scores)\n plt.show()\n return scores\n\ndef elbow_method(tfidf_matrix, linkage_matrix):\n dist = 1 - cosine_similarity(tfidf_matrix)\n dist = dist - dist.min() \n silhouette_k(dist, linkage_matrix, max_k=30)\n \nelbow_method(tfidf_matrix_feb[clusters_feb==6], linkage_matrix_feb)", "_____no_output_____" ] ], [ [ "## May", "_____no_output_____" ] ], [ [ "features_hierarchical_may, transformed_tokens_may, linkage_matrix_may, clusters_may = hierarchical_clustering(best_model_may, tfidf_matrix_may, 2)", "_____no_output_____" ], [ "agglomerative_clustering(6, features_hierarchical_may, df_may, 2, best_model_may, transformed_tokens_may, clusters_may)", "_____no_output_____" ], [ "elbow_method(tfidf_matrix_may[clusters_may==2], linkage_matrix_may)", "_____no_output_____" ] ], [ [ "## September", "_____no_output_____" ] ], [ [ "features_hierarchical_sep, transformed_tokens_sep, linkage_matrix_sep, clusters_sep = hierarchical_clustering(best_model_sep, tfidf_matrix_sep, 10)", "_____no_output_____" ], [ "agglomerative_clustering(2, features_hierarchical_sep, df_sep, 10, best_model_sep, transformed_tokens_sep, clusters_sep)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d056ab6596fca9ab0e81ccf740a02416f41d6171
14,464
ipynb
Jupyter Notebook
week06_policy_based/reinforce_pytorch.ipynb
RomaKoks/Practical_RL
ddcb71f9e45d4e08fe04da6404cb0e312681b615
[ "Unlicense" ]
5,304
2017-01-23T16:26:50.000Z
2022-03-31T21:21:45.000Z
week06_policy_based/reinforce_pytorch.ipynb
RomaKoks/Practical_RL
ddcb71f9e45d4e08fe04da6404cb0e312681b615
[ "Unlicense" ]
451
2017-01-23T20:56:34.000Z
2022-03-21T19:52:19.000Z
week06_policy_based/reinforce_pytorch.ipynb
RomaKoks/Practical_RL
ddcb71f9e45d4e08fe04da6404cb0e312681b615
[ "Unlicense" ]
1,731
2017-01-23T16:23:18.000Z
2022-03-31T20:44:33.000Z
32.285714
257
0.55531
[ [ [ "# REINFORCE in PyTorch\n\nJust like we did before for Q-learning, this time we'll design a PyTorch network to learn `CartPole-v0` via policy gradient (REINFORCE).\n\nMost of the code in this notebook is taken from approximate Q-learning, so you'll find it more or less familiar and even simpler.", "_____no_output_____" ] ], [ [ "import sys, os\nif 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):\n !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash\n !touch .setup_complete\n\n# This code creates a virtual display to draw game images on.\n# It will have no effect if your machine has a monitor.\nif type(os.environ.get(\"DISPLAY\")) is not str or len(os.environ.get(\"DISPLAY\")) == 0:\n !bash ../xvfb start\n os.environ['DISPLAY'] = ':1'", "_____no_output_____" ], [ "import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "A caveat: with some versions of `pyglet`, the following cell may crash with `NameError: name 'base' is not defined`. The corresponding bug report is [here](https://github.com/pyglet/pyglet/issues/134). If you see this error, try restarting the kernel.", "_____no_output_____" ] ], [ [ "env = gym.make(\"CartPole-v0\")\n\n# gym compatibility: unwrap TimeLimit\nif hasattr(env, '_max_episode_steps'):\n env = env.env\n\nenv.reset()\nn_actions = env.action_space.n\nstate_dim = env.observation_space.shape\n\nplt.imshow(env.render(\"rgb_array\"))", "_____no_output_____" ] ], [ [ "# Building the network for REINFORCE", "_____no_output_____" ], [ "For REINFORCE algorithm, we'll need a model that predicts action probabilities given states.\n\nFor numerical stability, please __do not include the softmax layer into your network architecture__.\nWe'll use softmax or log-softmax where appropriate.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn", "_____no_output_____" ], [ "# Build a simple neural network that predicts policy logits. \n# Keep it simple: CartPole isn't worth deep architectures.\nmodel = nn.Sequential(\n <YOUR CODE: define a neural network that predicts policy logits>\n)", "_____no_output_____" ] ], [ [ "#### Predict function", "_____no_output_____" ], [ "Note: output value of this function is not a torch tensor, it's a numpy array.\nSo, here gradient calculation is not needed.\n<br>\nUse [no_grad](https://pytorch.org/docs/stable/autograd.html#torch.autograd.no_grad)\nto suppress gradient calculation.\n<br>\nAlso, `.detach()` (or legacy `.data` property) can be used instead, but there is a difference:\n<br>\nWith `.detach()` computational graph is built but then disconnected from a particular tensor,\nso `.detach()` should be used if that graph is needed for backprop via some other (not detached) tensor;\n<br>\nIn contrast, no graph is built by any operation in `no_grad()` context, thus it's preferable here.", "_____no_output_____" ] ], [ [ "def predict_probs(states):\n \"\"\" \n Predict action probabilities given states.\n :param states: numpy array of shape [batch, state_shape]\n :returns: numpy array of shape [batch, n_actions]\n \"\"\"\n # convert states, compute logits, use softmax to get probability\n <YOUR CODE>\n return <YOUR CODE>", "_____no_output_____" ], [ "test_states = np.array([env.reset() for _ in range(5)])\ntest_probas = predict_probs(test_states)\nassert isinstance(test_probas, np.ndarray), \\\n \"you must return np array and not %s\" % type(test_probas)\nassert tuple(test_probas.shape) == (test_states.shape[0], env.action_space.n), \\\n \"wrong output shape: %s\" % np.shape(test_probas)\nassert np.allclose(np.sum(test_probas, axis=1), 1), \"probabilities do not sum to 1\"", "_____no_output_____" ] ], [ [ "### Play the game\n\nWe can now use our newly built agent to play the game.", "_____no_output_____" ] ], [ [ "def generate_session(env, t_max=1000):\n \"\"\" \n Play a full session with REINFORCE agent.\n Returns sequences of states, actions, and rewards.\n \"\"\"\n # arrays to record session\n states, actions, rewards = [], [], []\n s = env.reset()\n\n for t in range(t_max):\n # action probabilities array aka pi(a|s)\n action_probs = predict_probs(np.array([s]))[0]\n\n # Sample action with given probabilities.\n a = <YOUR CODE>\n new_s, r, done, info = env.step(a)\n\n # record session history to train later\n states.append(s)\n actions.append(a)\n rewards.append(r)\n\n s = new_s\n if done:\n break\n\n return states, actions, rewards", "_____no_output_____" ], [ "# test it\nstates, actions, rewards = generate_session(env)", "_____no_output_____" ] ], [ [ "### Computing cumulative rewards\n\n$$\n\\begin{align*}\nG_t &= r_t + \\gamma r_{t + 1} + \\gamma^2 r_{t + 2} + \\ldots \\\\\n&= \\sum_{i = t}^T \\gamma^{i - t} r_i \\\\\n&= r_t + \\gamma * G_{t + 1}\n\\end{align*}\n$$", "_____no_output_____" ] ], [ [ "def get_cumulative_rewards(rewards, # rewards at each step\n gamma=0.99 # discount for reward\n ):\n \"\"\"\n Take a list of immediate rewards r(s,a) for the whole session \n and compute cumulative returns (a.k.a. G(s,a) in Sutton '16).\n \n G_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...\n\n A simple way to compute cumulative rewards is to iterate from the last\n to the first timestep and compute G_t = r_t + gamma*G_{t+1} recurrently\n\n You must return an array/list of cumulative rewards with as many elements as in the initial rewards.\n \"\"\"\n <YOUR CODE>\n return <YOUR CODE: array of cumulative rewards>", "_____no_output_____" ], [ "get_cumulative_rewards(rewards)\nassert len(get_cumulative_rewards(list(range(100)))) == 100\nassert np.allclose(\n get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9),\n [1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0])\nassert np.allclose(\n get_cumulative_rewards([0, 0, 1, -2, 3, -4, 0], gamma=0.5),\n [0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0])\nassert np.allclose(\n get_cumulative_rewards([0, 0, 1, 2, 3, 4, 0], gamma=0),\n [0, 0, 1, 2, 3, 4, 0])\nprint(\"looks good!\")", "_____no_output_____" ] ], [ [ "#### Loss function and updates\n\nWe now need to define objective and update over policy gradient.\n\nOur objective function is\n\n$$ J \\approx { 1 \\over N } \\sum_{s_i,a_i} G(s_i,a_i) $$\n\nREINFORCE defines a way to compute the gradient of the expected reward with respect to policy parameters. The formula is as follows:\n\n$$ \\nabla_\\theta \\hat J(\\theta) \\approx { 1 \\over N } \\sum_{s_i, a_i} \\nabla_\\theta \\log \\pi_\\theta (a_i \\mid s_i) \\cdot G_t(s_i, a_i) $$\n\nWe can abuse PyTorch's capabilities for automatic differentiation by defining our objective function as follows:\n\n$$ \\hat J(\\theta) \\approx { 1 \\over N } \\sum_{s_i, a_i} \\log \\pi_\\theta (a_i \\mid s_i) \\cdot G_t(s_i, a_i) $$\n\nWhen you compute the gradient of that function with respect to network weights $\\theta$, it will become exactly the policy gradient.", "_____no_output_____" ] ], [ [ "def to_one_hot(y_tensor, ndims):\n \"\"\" helper: take an integer vector and convert it to 1-hot matrix. \"\"\"\n y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)\n y_one_hot = torch.zeros(\n y_tensor.size()[0], ndims).scatter_(1, y_tensor, 1)\n return y_one_hot", "_____no_output_____" ], [ "# Your code: define optimizers\noptimizer = torch.optim.Adam(model.parameters(), 1e-3)\n\n\ndef train_on_session(states, actions, rewards, gamma=0.99, entropy_coef=1e-2):\n \"\"\"\n Takes a sequence of states, actions and rewards produced by generate_session.\n Updates agent's weights by following the policy gradient above.\n Please use Adam optimizer with default parameters.\n \"\"\"\n\n # cast everything into torch tensors\n states = torch.tensor(states, dtype=torch.float32)\n actions = torch.tensor(actions, dtype=torch.int32)\n cumulative_returns = np.array(get_cumulative_rewards(rewards, gamma))\n cumulative_returns = torch.tensor(cumulative_returns, dtype=torch.float32)\n\n # predict logits, probas and log-probas using an agent.\n logits = model(states)\n probs = nn.functional.softmax(logits, -1)\n log_probs = nn.functional.log_softmax(logits, -1)\n\n assert all(isinstance(v, torch.Tensor) for v in [logits, probs, log_probs]), \\\n \"please use compute using torch tensors and don't use predict_probs function\"\n\n # select log-probabilities for chosen actions, log pi(a_i|s_i)\n log_probs_for_actions = torch.sum(\n log_probs * to_one_hot(actions, env.action_space.n), dim=1)\n \n # Compute loss here. Don't forgen entropy regularization with `entropy_coef` \n entropy = <YOUR CODE>\n loss = <YOUR CODE>\n\n # Gradient descent step\n <YOUR CODE>\n\n # technical: return session rewards to print them later\n return np.sum(rewards)", "_____no_output_____" ] ], [ [ "### The actual training", "_____no_output_____" ] ], [ [ "for i in range(100):\n rewards = [train_on_session(*generate_session(env)) for _ in range(100)] # generate new sessions\n \n print(\"mean reward:%.3f\" % (np.mean(rewards)))\n \n if np.mean(rewards) > 500:\n print(\"You Win!\") # but you can train even further\n break", "_____no_output_____" ] ], [ [ "### Results & video", "_____no_output_____" ] ], [ [ "# Record sessions\n\nimport gym.wrappers\n\nwith gym.wrappers.Monitor(gym.make(\"CartPole-v0\"), directory=\"videos\", force=True) as env_monitor:\n sessions = [generate_session(env_monitor) for _ in range(100)]", "_____no_output_____" ], [ "# Show video. This may not work in some setups. If it doesn't\n# work for you, you can download the videos and view them locally.\n\nfrom pathlib import Path\nfrom base64 import b64encode\nfrom IPython.display import HTML\n\nvideo_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])\nvideo_path = video_paths[-1] # You can also try other indices\n\nif 'google.colab' in sys.modules:\n # https://stackoverflow.com/a/57378660/1214547\n with video_path.open('rb') as fp:\n mp4 = fp.read()\n data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode()\nelse:\n data_url = str(video_path)\n\nHTML(\"\"\"\n<video width=\"640\" height=\"480\" controls>\n <source src=\"{}\" type=\"video/mp4\">\n</video>\n\"\"\".format(data_url))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d056ace8ccce4b5977f0a667427cdfcca8fd60db
410,278
ipynb
Jupyter Notebook
code/notebooks/synthetic_tests/model_multibody_shallow-seated/airborne_EQL_magdirection_RM_analysis.ipynb
pinga-lab/eqlayer-magnetization-direction
dd929120b22bbd8d638c8bc5924d15f41831dce2
[ "BSD-3-Clause" ]
3
2020-09-03T03:00:06.000Z
2021-11-10T10:33:08.000Z
code/notebooks/synthetic_tests/model_multibody_shallow-seated/.ipynb_checkpoints/airborne_EQL_magdirection_RM_analysis-checkpoint.ipynb
pinga-lab/eqlayer-magnetization-direction
dd929120b22bbd8d638c8bc5924d15f41831dce2
[ "BSD-3-Clause" ]
null
null
null
code/notebooks/synthetic_tests/model_multibody_shallow-seated/.ipynb_checkpoints/airborne_EQL_magdirection_RM_analysis-checkpoint.ipynb
pinga-lab/eqlayer-magnetization-direction
dd929120b22bbd8d638c8bc5924d15f41831dce2
[ "BSD-3-Clause" ]
1
2022-03-17T15:32:29.000Z
2022-03-17T15:32:29.000Z
475.409038
236,872
0.941228
[ [ [ "# Equivalent layer technique for estimating total magnetization direction: Analysis of the result", "_____no_output_____" ], [ "## Importing libraries", "_____no_output_____" ] ], [ [ "% matplotlib inline ", "_____no_output_____" ], [ "import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport cPickle as pickle\nimport datetime\nimport timeit\nimport string as st\n\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom fatiando.gridder import regular", "_____no_output_____" ], [ "notebook_name = 'airborne_EQL_magdirection_RM_analysis.ipynb'", "_____no_output_____" ] ], [ [ "## Plot style", "_____no_output_____" ] ], [ [ "plt.style.use('ggplot')", "_____no_output_____" ] ], [ [ "## Importing my package", "_____no_output_____" ] ], [ [ "dir_modules = '../../../mypackage'\nsys.path.append(dir_modules)", "_____no_output_____" ], [ "import auxiliary_functions as fc", "_____no_output_____" ] ], [ [ "## Loading model", "_____no_output_____" ] ], [ [ "with open('data/model_multi.pickle') as f:\n model_multi = pickle.load(f)", "_____no_output_____" ] ], [ [ "## Loading observation points", "_____no_output_____" ] ], [ [ "with open('data/airborne_survey.pickle') as f:\n airborne = pickle.load(f)", "_____no_output_____" ] ], [ [ "## Loading data set", "_____no_output_____" ] ], [ [ "with open('data/data_set.pickle') as f:\n data = pickle.load(f)", "_____no_output_____" ] ], [ [ "## Loading results", "_____no_output_____" ] ], [ [ "with open('data/result_RM_airb.pickle') as f:\n results = pickle.load(f)", "_____no_output_____" ] ], [ [ "## List of saved files", "_____no_output_____" ] ], [ [ "saved_files = []", "_____no_output_____" ] ], [ [ "## Observation area", "_____no_output_____" ] ], [ [ "print 'Area limits: \\n x_max = %.1f m \\n x_min = %.1f m \\n y_max = %.1f m \\n y_min = %.1f m' % (airborne['area'][1], \n airborne['area'][0],\n airborne['area'][3],\n airborne['area'][2])", "Area limits: \n x_max = 5500.0 m \n x_min = -6500.0 m \n y_max = 6500.0 m \n y_min = -5500.0 m\n" ] ], [ [ "## Airborne survey information", "_____no_output_____" ] ], [ [ "print 'Shape : (%.0f,%.0f)'% airborne['shape'] \nprint 'Number of data: %.1f' % airborne['N']\nprint 'dx: %.1f m' % airborne['dx']\nprint 'dy: %.1f m ' % airborne['dy']", "Shape : (49,25)\nNumber of data: 1225.0\ndx: 250.0 m\ndy: 500.0 m \n" ] ], [ [ "## Properties of the model", "_____no_output_____" ], [ "### Main field", "_____no_output_____" ] ], [ [ "inc_gf,dec_gf = model_multi['main_field']", "_____no_output_____" ], [ "print'Main field inclination: %.1f degree' % inc_gf\nprint'Main field declination: %.1f degree' % dec_gf", "Main field inclination: -40.0 degree\nMain field declination: -22.0 degree\n" ] ], [ [ "### Magnetization direction", "_____no_output_____" ] ], [ [ "print 'Inclination: %.1f degree' % model_multi['inc_R']\nprint 'Declination: %.1f degree' % model_multi['dec_R']", "Inclination: -25.0 degree\nDeclination: 30.0 degree\n" ], [ "inc_R,dec_R = model_multi['inc_R'],model_multi['dec_R']", "_____no_output_____" ] ], [ [ "### Coordinates equivalent sources", "_____no_output_____" ] ], [ [ "h = results['layer_depth']", "_____no_output_____" ], [ "shape_layer = (airborne['shape'][0],airborne['shape'][1])", "_____no_output_____" ], [ "xs,ys,zs = regular(airborne['area'],shape_layer,h)", "_____no_output_____" ] ], [ [ "## The best solution using L-curve", "_____no_output_____" ] ], [ [ "m_LM = results['magnetic_moment'][4]\ninc_est = results['inc_est'][4]\ndec_est = results['dec_est'][4]", "_____no_output_____" ], [ "mu = results['reg_parameter'][4]", "_____no_output_____" ], [ "phi = results['phi'][4]", "_____no_output_____" ], [ "print mu", "350000.0\n" ] ], [ [ "## Visualization of the convergence", "_____no_output_____" ] ], [ [ "phi = (np.array(phi)/airborne['x'].size)", "_____no_output_____" ], [ "title_font = 22\nbottom_font = 20\nsaturation_factor = 1.\nplt.close('all')\nplt.figure(figsize=(10,10), tight_layout=True)\n\nplt.plot(phi,'b-',linewidth=1.5)\nplt.title('Convergence', fontsize=title_font)\nplt.xlabel('iteration', fontsize = title_font)\nplt.ylabel('Goal function ', fontsize = title_font)\nplt.tick_params(axis='both', which='major', labelsize=15)\n\nfile_name = 'figs/airborne/convergence_LM_NNLS_magRM'\nplt.savefig(file_name+'.png',dpi=300)\nsaved_files.append(file_name+'.png')\n\nplt.show()", "/home/andrelreis/anaconda3/envs/py2/lib/python2.7/site-packages/matplotlib/figure.py:2299: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n warnings.warn(\"This figure includes Axes that are not compatible \"\n" ] ], [ [ "## Estimated magnetization direction", "_____no_output_____" ] ], [ [ "print (inc_est,dec_est)", "(-28.718141852418594, 31.83100936371733)\n" ], [ "print (inc_R,dec_R)", "(-25.0, 30.0)\n" ] ], [ [ "## Comparison between observed data and predicted data", "_____no_output_____" ] ], [ [ "pred = fc.tfa_layer(airborne['x'],airborne['y'],airborne['z'],\n xs,ys,zs,inc_gf,dec_gf,m_LM,inc_est,dec_est)", "_____no_output_____" ], [ "res = pred - data['tfa_obs_RM_airb']", "_____no_output_____" ], [ "r_norm,r_mean,r_std = fc.residual(data['tfa_obs_RM_airb'],pred)", "_____no_output_____" ], [ "title_font = 22\nbottom_font = 20\nplt.figure(figsize=(28,11), tight_layout=True)\n\nranges = np.abs([data['tfa_obs_RM_airb'].max(),\n data['tfa_obs_RM_airb'].min(),\n pred.max(), pred.min()]).max()\n\nranges_r = np.abs([res.max(),res.min()]).max()\n\n## Observed data plot\nax1=plt.subplot(1,4,1)\n\nplt.title('Observed data', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\nplt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),\n 1e-3*airborne['x'].reshape(airborne['shape']), \n data['tfa_obs_RM_airb'].reshape(airborne['shape']),\n 30, cmap='viridis',vmin=-ranges, vmax=ranges)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\ncb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)\ncb.set_label('nT',size=bottom_font)\ncb.ax.tick_params(labelsize=bottom_font)\n\n## Predicted data plot\nax2=plt.subplot(1,4,2)\n\nplt.title('Predicted data', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\nplt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),\n 1e-3*airborne['x'].reshape(airborne['shape']),\n pred.reshape(airborne['shape']),\n 30, cmap='viridis', vmin=-ranges, vmax=ranges)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\ncb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)\ncb.set_label('nT',size=bottom_font)\ncb.ax.tick_params(labelsize=bottom_font)\n\n## Residuals plot and histogram\nax3=plt.subplot(1,4,3)\n\nplt.title('Residuals map', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\nplt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),\n 1e-3*airborne['x'].reshape(airborne['shape']),\n res.reshape(airborne['shape']),\n 30, cmap='viridis', vmin=-ranges_r, vmax=ranges_r)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\ncb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)\ncb.set_label('nT',size=bottom_font)\ncb.ax.tick_params(labelsize=bottom_font)\n\nax4=plt.subplot(1,4,4)\nplt.title('Histogram of residuals', fontsize =title_font)\nplt.xlabel('Residuals (nT)', fontsize = title_font)\nplt.ylabel('Frequency', fontsize = title_font)\nplt.text(0.02, 0.97, \"mean = {:.2f}\\nstd = {:.2f} \".format(np.mean(res), np.std(res)),\n horizontalalignment='left',\n verticalalignment='top',\n transform = ax4.transAxes, fontsize=bottom_font)\nn, bins, patches = plt.hist(res,bins=30, normed=True, facecolor='black')\ngauss = mlab.normpdf(bins, 0., 10.)\nplt.plot(bins, gauss, 'r-', linewidth=4.)\nax4.set_xticks([-100.0,-50.,0.0,50.,100.0])\nax4.set_yticks([.0,.010,.020,.030,.040,.05,.06])\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\n\n\n## \nfile_name = 'figs/airborne/data_fitting_LM_NNLS_magRM'\nplt.savefig(file_name+'.png',dpi=300)\nsaved_files.append(file_name+'.png')\n\nplt.show()", "/home/andrelreis/anaconda3/envs/py2/lib/python2.7/site-packages/matplotlib/axes/_axes.py:6571: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n/home/andrelreis/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:65: MatplotlibDeprecationWarning: scipy.stats.norm.pdf\n" ] ], [ [ "## Positive magnetic-moment distribution", "_____no_output_____" ] ], [ [ "title_font = 22\nbottom_font = 20\nplt.close('all')\nplt.figure(figsize=(10,10), tight_layout=True)\n\nplt.title('Magnetic moment distribution', fontsize=title_font)\nplt.contourf(1e-3*ys.reshape(shape_layer),1e-3*xs.reshape(shape_layer), \n m_LM.reshape(shape_layer), 40, cmap='inferno')\ncb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)\ncb.set_label('$A.m^2$',size=bottom_font)\ncb.ax.tick_params(labelsize=bottom_font)\nplt.xlabel('y (km)', fontsize = title_font)\nplt.ylabel('x (km)', fontsize = title_font)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\n\nfile_name = 'figs/airborne/magnetic_moment_positive_LM_NNLS_magRM'\nplt.savefig(file_name+'.png',dpi=300)\nsaved_files.append(file_name+'.png')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Figure for paper", "_____no_output_____" ] ], [ [ "#title_font = 17\ntitle_font = 5\n#bottom_font = 14\nbottom_font = 4\nhist_font = 5\n\nheight_per_width = 17./15.\nplt.figure(figsize=(4.33,4.33*height_per_width), tight_layout=True)\n\nranges = np.abs([data['tfa_obs_RM_airb'].max(),\n data['tfa_obs_RM_airb'].min(),\n pred.max(), pred.min()]).max()\n\nranges_r = np.abs([res.max(),res.min()]).max()\n\n## Observed data plot\nax1=plt.subplot(3,2,1)\n\nplt.title('(a) Observed data', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\nplt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),\n 1e-3*airborne['x'].reshape(airborne['shape']), \n data['tfa_obs_RM_airb'].reshape(airborne['shape']),\n 30, cmap='viridis',vmin=-ranges, vmax=ranges)\ncbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)\ncbar.set_label('nT',size=title_font)\ncbar.ax.tick_params(labelsize=bottom_font)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\nplt.title('(a) Observed data', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\n\n## Predicted data plot\nax2=plt.subplot(3,2,2)\n\nplt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),\n 1e-3*airborne['x'].reshape(airborne['shape']),\n pred.reshape(airborne['shape']),\n 30, cmap='viridis', vmin=-ranges, vmax=ranges)\ncbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)\ncbar.set_label('nT',size=title_font)\ncbar.ax.tick_params(labelsize=bottom_font)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\nplt.title('(b) Predicted data', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\n\n## Residuals plot and histogram\nax3=plt.subplot(3,2,3)\n\nplt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),\n 1e-3*airborne['x'].reshape(airborne['shape']),\n res.reshape(airborne['shape']),\n 30, cmap='viridis', vmin=-ranges_r, vmax=ranges_r)\ncbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)\ncbar.set_label('nT',size=title_font)\ncbar.ax.tick_params(labelsize=bottom_font)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\nplt.title('(c) Residuals', fontsize=title_font)\nplt.xlabel('y (km)',fontsize = title_font)\nplt.ylabel('x (km)',fontsize = title_font)\n\n\nax4= plt.subplot(3,2,4)\nplt.text(0.02, 0.97, \"mean = {:.2f}\\nstd = {:.2f} \".format(np.mean(res), np.std(res)),\n horizontalalignment='left',\n verticalalignment='top',\n transform = ax4.transAxes, fontsize=hist_font)\nn, bins, patches = plt.hist(res,bins=20, normed=True, facecolor='black')\ngauss = mlab.normpdf(bins, 0., 10.)\nplt.plot(bins, gauss, 'r-', linewidth=1.)\nax4.set_xticks([-100.0,-50.,0.0,50.,100.0])\nax4.set_yticks([.0,.010,.020,.030,.040,.05,.06])\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\nplt.title('(d) Histogram of residuals', fontsize =title_font)\nplt.xlabel('Residuals (nT)', fontsize = title_font)\nplt.ylabel('Frequency', fontsize = title_font)\n\nax5= plt.subplot(3,2,5)\nplt.contourf(1e-3*ys.reshape(shape_layer),1e-3*xs.reshape(shape_layer), \n m_LM.reshape(shape_layer)*1e-9, 30, cmap='inferno')\ncbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)\ncbar.set_label('$10^{9}$ A$\\cdot$m$^2$',size=title_font)\ncbar.ax.tick_params(labelsize=bottom_font)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\nplt.title('(e) Magnetic moment distribution', fontsize=title_font)\nplt.xlabel('y (km)', fontsize = title_font)\nplt.ylabel('x (km)', fontsize = title_font)\n\nax6= plt.subplot(3,2,6)\nplt.plot(phi, 'b-',linewidth=1.0)\nplt.tick_params(axis='both', which='major', labelsize=bottom_font)\nplt.title('(f) Convergence', fontsize=title_font)\nplt.xlabel('iteration', fontsize = title_font)\nplt.ylabel('Goal function ', fontsize = title_font)\n\n########################################################################### \n#file_name = 'figs/airborne/results_compiled_LM_NNLS_magRM'\nfile_name = 'figs/airborne/Fig3'\nplt.savefig(file_name+'.png',dpi=1200)\nsaved_files.append(file_name+'.png')\n\nplt.savefig(file_name+'.eps',dpi=1200)\nsaved_files.append(file_name+'.eps')\n\n\nplt.show()", "/home/andrelreis/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:71: MatplotlibDeprecationWarning: scipy.stats.norm.pdf\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d056b4f40c1feea1bb61ce4625ea7ba6f62eed51
136,719
ipynb
Jupyter Notebook
notebooks/Parsing Natural Language in Python.ipynb
peey/python-tutorial-notebooks
915b7c99eef17e5f54507f3b1cdcc33006ec3acf
[ "Apache-2.0" ]
null
null
null
notebooks/Parsing Natural Language in Python.ipynb
peey/python-tutorial-notebooks
915b7c99eef17e5f54507f3b1cdcc33006ec3acf
[ "Apache-2.0" ]
null
null
null
notebooks/Parsing Natural Language in Python.ipynb
peey/python-tutorial-notebooks
915b7c99eef17e5f54507f3b1cdcc33006ec3acf
[ "Apache-2.0" ]
null
null
null
248.58
1,857
0.682429
[ [ [ "# Parsing Natural Language in Python", "_____no_output_____" ], [ "**(C) 2018 by [Damir Cavar](http://damir.cavar.me/)**", "_____no_output_____" ], [ "**License:** [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/))", "_____no_output_____" ], [ "This is a tutorial related to the discussion of parsing with Probabilistic Context Free Grammars (PCFG) in the class *Advanced Natural Language Processing* taught at Indiana University in Fall 2018.", "_____no_output_____" ], [ "This code and tutorial is based on different summer school courses that I taught and tutorials that I gave at different occasions in Europe and the US. This particular example will use code from the **TDAParser.py** and other scripts developed since 2002. Most of this material was used in general introduction courses to algorithms in Natural Language Processing that I taught at Indiana University, University of Konstanz, University of Zadar, University of Nova Gorica.", "_____no_output_____" ] ], [ [ "import sys", "_____no_output_____" ] ], [ [ "## The Grammar Class", "_____no_output_____" ], [ "Let us assume that our Phrase Structure Grammar consists of rules that contain one symbol in the Left-Hand Side, followed by a production symbol, an arrow, and by a list of at least one terminal and symbol. Comments can be introduced using the *#* symbol. Every rule has to be contained in one line.", "_____no_output_____" ] ], [ [ "grammarText = \"\"\"\n# PSG1\n# small English grammar\n# (C) 2005 by Damir Cavar, Indiana University\n\n# Grammar:\nS -> NP VP\n\nNP -> N\nNP -> Adj N\nNP -> Art Adj N\nNP -> Art N\nNP -> Art N PP\n#NP -> Art N NP\n\nVP -> V\nVP -> V NP\nVP -> Adv V NP\nVP -> V PP\nVP -> V NP PP\n\nPP -> P NP\n\n\n# Lexicon:\nN -> John\nN -> Mary\nN -> bench\nN -> cat\nN -> mouse\n\nArt -> the\nArt -> a\n\nAdj -> green\nAdj -> yellow\nAdj -> big\nAdj -> small\n\nAdv -> often\nAdv -> yesterday\n\nV -> kissed\nV -> loves\nV -> sees\nV -> meets\nV -> chases\n\nP -> on\nP -> in\nP -> beside\nP -> under\n\"\"\"", "_____no_output_____" ] ], [ [ "We can parse this grammar into a representation that allows us to fetch the left- and the right-hand side of a rule for top- or bottom-up parsing.", "_____no_output_____" ] ], [ [ "class PSG:\n def __init__(self, grammar):\n self.LHS = {}\n self.RHS = {}\n self.__read__(grammar)\n\n def __str__(self):\n text = \"\"\n for i in self.LHS.keys():\n if len(text) > 0:\n text += \"\\n\"\n for x in self.LHS[i]:\n text += i + \" -> \" + \" \".join(x) + \"\\n\"\n return text\n\n def __read__(self, g):\n for i in g.split(\"\\n\"):\n i = i.split(\"#\")[0].strip() # cut off comment string and strip\n if len(i) == 0: continue\n tokens = i.split(\"->\")\n if len(tokens) != 2: continue\n lhs = tokens[0].split()\n if len(lhs) != 1: continue\n rhs = tuple(tokens[1].split())\n value = self.LHS.get(lhs[0], [])\n if rhs not in value: value.append(rhs)\n self.LHS[lhs[0]] = value\n value = self.RHS.get(rhs, [])\n if lhs[0] not in value: value.append(lhs[0])\n self.RHS[rhs] = value\n\n def getRHS(self, left):\n return self.LHS.get(left, [])\n\n def getLHS(self, right):\n return self.RHS.get(right, [])\n", "_____no_output_____" ] ], [ [ "The grammar file:", "_____no_output_____" ], [ "## The Top-Down Parser", "_____no_output_____" ], [ "Defining some parameters:", "_____no_output_____" ] ], [ [ "LIFO = -1\nFIFO = 0\nstrategy = FIFO", "_____no_output_____" ], [ "def tdparse(inp, goal, grammar, agenda):\n print(\"Got : %s\\tinput: %s\" % (goal, inp))\n if goal == inp == []: print(\"Success\")\n elif goal == [] or inp == []:\n if agenda == []: print(\"Fail: Agenda empty!\")\n else:\n entry = agenda.pop(strategy)\n print(\"Backing up to: %s with %s\" % (entry[0], entry[1]))\n tdparse(entry[1], entry[0], grammar, agenda)\n else: # there is something in goal and input\n if goal[0] == inp[0]: # if initial symbols match, reduce lists, parse\n tdparse(inp[1:], goal[1:], grammar, agenda)\n else:\n for i in grammar.LHS.get(goal[0], []):\n if [list(i) + goal[1:], inp] not in agenda:\n agenda.append([list(i) + goal[1:], inp])\n if len(agenda) > 0:\n entry = agenda.pop(strategy)\n tdparse(entry[1], entry[0], grammar, agenda)\n else: print(\"Fail: Agenda empty!\")", "_____no_output_____" ], [ "myGrammar = PSG(grammarText)\nprint(myGrammar)\ntdparse( ('John', 'loves', 'Mary') , [\"S\"], myGrammar, [])", "S -> NP VP\n\nNP -> N\nNP -> Adj N\nNP -> Art Adj N\nNP -> Art N\nNP -> Art N PP\n\nVP -> V\nVP -> V NP\nVP -> Adv V NP\nVP -> V PP\nVP -> V NP PP\n\nPP -> P NP\n\nN -> John\nN -> Mary\nN -> bench\nN -> cat\nN -> mouse\n\nArt -> the\nArt -> a\n\nAdj -> green\nAdj -> yellow\nAdj -> big\nAdj -> small\n\nAdv -> often\nAdv -> yesterday\n\nV -> kissed\nV -> loves\nV -> sees\nV -> meets\nV -> chases\n\nP -> on\nP -> in\nP -> beside\nP -> under\n\nGot : ['S']\tinput: ('John', 'loves', 'Mary')\nGot : ['NP', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['Adj', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['Art', 'Adj', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['Art', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['Art', 'N', 'PP', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['John', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['VP']\tinput: ('loves', 'Mary')\nGot : ['Mary', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['bench', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['cat', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['mouse', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['green', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['yellow', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['big', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['small', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['the', 'Adj', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['a', 'Adj', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['the', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['a', 'N', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['the', 'N', 'PP', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['a', 'N', 'PP', 'VP']\tinput: ('John', 'loves', 'Mary')\nGot : ['V']\tinput: ('loves', 'Mary')\nGot : ['V', 'NP']\tinput: ('loves', 'Mary')\nGot : ['Adv', 'V', 'NP']\tinput: ('loves', 'Mary')\nGot : ['V', 'PP']\tinput: ('loves', 'Mary')\nGot : ['V', 'NP', 'PP']\tinput: ('loves', 'Mary')\nGot : ['kissed']\tinput: ('loves', 'Mary')\nGot : ['loves']\tinput: ('loves', 'Mary')\nGot : []\tinput: ('Mary',)\nBacking up to: ['sees'] with ('loves', 'Mary')\nGot : ['sees']\tinput: ('loves', 'Mary')\nGot : ['meets']\tinput: ('loves', 'Mary')\nGot : ['chases']\tinput: ('loves', 'Mary')\nGot : ['kissed', 'NP']\tinput: ('loves', 'Mary')\nGot : ['loves', 'NP']\tinput: ('loves', 'Mary')\nGot : ['NP']\tinput: ('Mary',)\nGot : ['sees', 'NP']\tinput: ('loves', 'Mary')\nGot : ['meets', 'NP']\tinput: ('loves', 'Mary')\nGot : ['chases', 'NP']\tinput: ('loves', 'Mary')\nGot : ['often', 'V', 'NP']\tinput: ('loves', 'Mary')\nGot : ['yesterday', 'V', 'NP']\tinput: ('loves', 'Mary')\nGot : ['kissed', 'PP']\tinput: ('loves', 'Mary')\nGot : ['loves', 'PP']\tinput: ('loves', 'Mary')\nGot : ['PP']\tinput: ('Mary',)\nGot : ['sees', 'PP']\tinput: ('loves', 'Mary')\nGot : ['meets', 'PP']\tinput: ('loves', 'Mary')\nGot : ['chases', 'PP']\tinput: ('loves', 'Mary')\nGot : ['kissed', 'NP', 'PP']\tinput: ('loves', 'Mary')\nGot : ['loves', 'NP', 'PP']\tinput: ('loves', 'Mary')\nGot : ['NP', 'PP']\tinput: ('Mary',)\nGot : ['sees', 'NP', 'PP']\tinput: ('loves', 'Mary')\nGot : ['meets', 'NP', 'PP']\tinput: ('loves', 'Mary')\nGot : ['chases', 'NP', 'PP']\tinput: ('loves', 'Mary')\nGot : ['N']\tinput: ('Mary',)\nGot : ['Adj', 'N']\tinput: ('Mary',)\nGot : ['Art', 'Adj', 'N']\tinput: ('Mary',)\nGot : ['Art', 'N']\tinput: ('Mary',)\nGot : ['Art', 'N', 'PP']\tinput: ('Mary',)\nGot : ['P', 'NP']\tinput: ('Mary',)\nGot : ['N', 'PP']\tinput: ('Mary',)\nGot : ['Adj', 'N', 'PP']\tinput: ('Mary',)\nGot : ['Art', 'Adj', 'N', 'PP']\tinput: ('Mary',)\nGot : ['Art', 'N', 'PP', 'PP']\tinput: ('Mary',)\nGot : ['John']\tinput: ('Mary',)\nGot : ['Mary']\tinput: ('Mary',)\nGot : []\tinput: ()\nBacking up to: ['bench'] with ('Mary',)\nGot : ['bench']\tinput: ('Mary',)\nGot : ['cat']\tinput: ('Mary',)\nGot : ['mouse']\tinput: ('Mary',)\nGot : ['green', 'N']\tinput: ('Mary',)\nGot : ['yellow', 'N']\tinput: ('Mary',)\nGot : ['big', 'N']\tinput: ('Mary',)\nGot : ['small', 'N']\tinput: ('Mary',)\nGot : ['the', 'Adj', 'N']\tinput: ('Mary',)\nGot : ['a', 'Adj', 'N']\tinput: ('Mary',)\nGot : ['the', 'N']\tinput: ('Mary',)\nGot : ['a', 'N']\tinput: ('Mary',)\nGot : ['the', 'N', 'PP']\tinput: ('Mary',)\nGot : ['a', 'N', 'PP']\tinput: ('Mary',)\nGot : ['on', 'NP']\tinput: ('Mary',)\nGot : ['in', 'NP']\tinput: ('Mary',)\nGot : ['beside', 'NP']\tinput: ('Mary',)\nGot : ['under', 'NP']\tinput: ('Mary',)\nGot : ['John', 'PP']\tinput: ('Mary',)\nGot : ['Mary', 'PP']\tinput: ('Mary',)\nGot : ['PP']\tinput: ()\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ] ]
d056b85a84fcccee47f4f13e3aa60bec6927bc50
774,894
ipynb
Jupyter Notebook
Visualization/Plotly 4_in_class.ipynb
AKuHAK/ValRCS-LU_PySem_2020_2
e5ab22765993852bdebeb121f8a0f4c1f0aac21e
[ "MIT" ]
null
null
null
Visualization/Plotly 4_in_class.ipynb
AKuHAK/ValRCS-LU_PySem_2020_2
e5ab22765993852bdebeb121f8a0f4c1f0aac21e
[ "MIT" ]
null
null
null
Visualization/Plotly 4_in_class.ipynb
AKuHAK/ValRCS-LU_PySem_2020_2
e5ab22765993852bdebeb121f8a0f4c1f0aac21e
[ "MIT" ]
1
2021-05-07T09:20:55.000Z
2021-05-07T09:20:55.000Z
32.691811
25,223
0.385431
[ [ [ "# to get a version that is at least we can use >=\n# pip install plotly>=4.0.0\n!pip install plotly>=4.1.1", "_____no_output_____" ], [ "!pip uninstall cufflinks", "_____no_output_____" ], [ "import plotly", "_____no_output_____" ], [ "plotly.__version__", "_____no_output_____" ], [ "# !pip install \"notebook>=5.3\" \"ipywidgets>=7.2\"", "_____no_output_____" ], [ "import plotly.graph_objects as go\nfig = go.Figure(data=go.Bar(x=[-3,-2,-1,0,1], y=[2, 3, 1, 5.3, -1], marker_color='rgb(226, 118, 155)'))\nfig.show()", "_____no_output_____" ], [ "# https://plot.ly/python/line-and-scatter/\nimport plotly.express as px\niris = px.data.iris()\nfig = px.scatter(iris, x=\"sepal_width\", y=\"sepal_length\")\nfig.show()", "_____no_output_____" ], [ "iris.head()", "_____no_output_____" ], [ "iris.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150 entries, 0 to 149\nData columns (total 6 columns):\nsepal_length 150 non-null float64\nsepal_width 150 non-null float64\npetal_length 150 non-null float64\npetal_width 150 non-null float64\nspecies 150 non-null object\nspecies_id 150 non-null int64\ndtypes: float64(4), int64(1), object(1)\nmemory usage: 7.1+ KB\n" ], [ "# https://plot.ly/python/line-and-scatter/\nimport plotly.express as px\niris = px.data.iris()\nfig = px.scatter(iris, x=\"sepal_width\", y=\"sepal_length\", color=\"species\")\nfig.show()", "_____no_output_____" ], [ "# https://plot.ly/python/line-and-scatter/\nimport plotly.express as px\niris = px.data.iris()\nfig = px.scatter(iris, x=\"sepal_width\", \n y=\"sepal_length\", \n size=\"petal_length\", \n color=\"species\",\n hover_data=['petal_width', 'species_id'] \n )\nfig.show()", "_____no_output_____" ], [ "# Line plot with plotly express\nimport plotly.express as px\ngapminder = px.data.gapminder().query(\"continent == 'Oceania'\")\nfig = px.line(gapminder, x='year', y='lifeExp', color='country')\nfig.show()", "_____no_output_____" ], [ "# Line plot with plotly express\nimport plotly.express as px\ngapminder = px.data.gapminder().query(\"continent == 'Oceania'\")\nfig = px.line(gapminder, x='year', y='lifeExp', color='country')\nfig.show()", "_____no_output_____" ], [ "# Line plot with plotly express\nimport plotly.express as px\ngapminder = px.data.gapminder().query(\"continent == 'Europe'\")\nfig = px.line(gapminder, x='year', y='lifeExp', color='country')\nfig.show()", "_____no_output_____" ], [ "gapminder.head()", "_____no_output_____" ], [ "allcountries = px.data.gapminder()\nallcountries.head()", "_____no_output_____" ], [ "finland = allcountries[allcountries['country'] == 'Finland']\nfinland.tail()", "_____no_output_____" ], [ "# https://stackoverflow.com/questions/12096252/use-a-list-of-values-to-select-rows-from-a-pandas-dataframe\nnordic = allcountries[allcountries['country'].isin(['Finland', 'Norway', 'Denmark', 'Sweden'])]\nnordic.head()", "_____no_output_____" ], [ "fig = px.line(nordic, x='year', y='pop', color='country')\nfig.show()", "_____no_output_____" ], [ "fig = px.line(nordic, x='year', y='lifeExp', color='country')\nfig.show()", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "N = 1000\nt = np.linspace(0, 10, 101)\ny = np.sin(t)", "_____no_output_____" ], [ "t2 = np.arange(0, 10.01, 0.1)\nt2", "_____no_output_____" ], [ "t", "_____no_output_____" ], [ "fig = go.Figure(data=go.Scatter(x=t, y=y, mode='markers'))\nfig.show()", "_____no_output_____" ], [ "N = 100\nrandom_x = np.linspace(0, 1, N)\nrandom_y0 = np.random.randn(N) + 5\nrandom_y1 = np.random.randn(N)\nrandom_y2 = np.random.randn(N) - 5\n\nfig = go.Figure()\n\n# Add traces\nfig.add_trace(go.Scatter(x=random_x, y=random_y0,\n mode='markers',\n name='markers'))\nfig.add_trace(go.Scatter(x=random_x, y=random_y1,\n mode='lines+markers',\n name='lines+markers'))\n# fig.add_trace(go.Scatter(x=random_x, y=random_y2,\n# mode='lines',\n# name='lines'))\n\nfig.add_trace(go.Bar(x=random_x, y=random_y2))\n\nfig.show()", "_____no_output_____" ], [ "N = 100\nrandom_x = np.linspace(0, 1, N)\nrandom_y0 = np.random.randn(N) + 5\nrandom_y1 = np.random.randn(N)\nrandom_y2 = np.random.randn(N) - 5\nrandom_y3 = np.random.randn(N) + 10\n\nfig = go.Figure()\n\n# Add traces\nfig.add_trace(go.Scatter(x=random_x, y=random_y0,\n mode='markers',\n name='markers'))\nfig.add_trace(go.Scatter(x=random_x, y=random_y1,\n mode='lines+markers',\n name='lines+markers'))\nfig.add_trace(go.Scatter(x=random_x, y=random_y2,\n mode='lines',\n name='lines'))\n\n# using add_trace we can add different type of charts on the same plot figure\n# https://plot.ly/python/bar-charts/\nfig.add_trace(go.Bar(x=random_x, y=random_y3))\n\nfig.show()", "_____no_output_____" ], [ "fig = px.bar(finland, x='year', y='lifeExp',\n hover_data=['lifeExp', 'gdpPercap'], color='gdpPercap',\n labels={'pop':'population of Finland'}, height=400)\nfig.show()", "_____no_output_____" ], [ "nordic.columns", "_____no_output_____" ], [ "fig = px.bar(nordic, x='year', y='lifeExp',\n hover_data=['lifeExp', 'gdpPercap'], color='country',\n labels={'pop':'population of Nordic Countries'}, height=400, barmode=\"group\")\nfig.update_layout(barmode='group', xaxis_tickangle=-45)\nfig.show()", "_____no_output_____" ], [ "# so default for bar is stacked bar , if we need grouped bars we specify it above\nfig = px.bar(nordic, x='year', y='lifeExp',\n hover_data=['lifeExp', 'gdpPercap'], color='country',\n labels={'pop':'population of Nordic Countries'}, height=400)\nfig.update_layout(xaxis_tickangle=-45)\nfig.show()", "_____no_output_____" ], [ "labels = ['Oxygen','Hydrogen','Carbon_Dioxide','Nitrogen']\nvalues = [4500, 2500, 1053, 500]\n\nfig = go.Figure(data=[go.Pie(labels=labels, values=values)])\n\ncolors = ['rgb(127,0,127)', 'mediumturquoise', 'darkorange', 'lightgreen']\nfig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=20,\n marker=dict(colors=colors, line=dict(color='#000000', width=2)))\nfig.update_traces(hole = 0.3)\nfig.update_traces(title = \"Elements in atmosphere\")\nfig.show()", "_____no_output_____" ], [ "labels = ['Oxygen','Hydrogen','Carbon_Dioxide','Nitrogen']\nvalues = [4500, 2500, 1053, 500]\n\nfig = go.Figure(data=[go.Pie(labels=labels, values=values)])\n\ncolors = ['rgb(127,0,127)', 'mediumturquoise', 'darkorange', 'lightgreen']\nfig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=20,\n marker=dict(colors=colors, line=dict(color='#000000', width=2)))\n# fig.update_traces(hole = 0.3)\nfig.update_traces(title = \"Elements in atmosphere\")\nfig.show()", "_____no_output_____" ], [ "df = px.data.gapminder()\n\nfig = px.scatter(df.query(\"year==2007\"), x=\"gdpPercap\", y=\"lifeExp\",\n\t size=\"pop\", color=\"continent\",\n hover_name=\"country\", log_x=True, size_max=60)\nfig.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d056ba0a264b83f1fac7fb0e917c5dd2cccf47b6
29,584
ipynb
Jupyter Notebook
samples/demo.ipynb
jtchilders/Mask_RCNN
bbca2cf929f0649b787697574223c14d4763ae06
[ "MIT" ]
null
null
null
samples/demo.ipynb
jtchilders/Mask_RCNN
bbca2cf929f0649b787697574223c14d4763ae06
[ "MIT" ]
null
null
null
samples/demo.ipynb
jtchilders/Mask_RCNN
bbca2cf929f0649b787697574223c14d4763ae06
[ "MIT" ]
null
null
null
60.997938
1,759
0.593395
[ [ [ "# Mask R-CNN Demo\n\nA quick intro to using the pre-trained model to detect and segment objects.", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport skimage.io\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport cv2,time,json,glob\nfrom IPython.display import clear_output\n\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n# Import COCO config\nsys.path.append(os.path.join(\"/home/jchilders/coco/\")) # To find local version\nfrom coco import coco\nimport tensorflow as tf\nprint('tensorflow version: ',tf.__version__)\nprint('using gpu: ',tf.test.is_gpu_available())\n%matplotlib inline \n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n# Directory of images to run detection on\nIMAGE_DIR = os.path.join(ROOT_DIR, \"images\")", "tensorflow version: 1.15.0\nusing gpu: True\n" ] ], [ [ "## Configurations\n\nWe'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.\n\nFor inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.", "_____no_output_____" ] ], [ [ "class InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 10\n BATCH_SIZE=10\n \n\nconfig = InferenceConfig()\nconfig.display()", "_____no_output_____" ] ], [ [ "## Create Model and Load Trained Weights", "_____no_output_____" ] ], [ [ "# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)", "_____no_output_____" ] ], [ [ "## Class Names\n\nThe model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.\n\nTo improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.\n\nTo get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.\n```\n# Load COCO dataset\ndataset = coco.CocoDataset()\ndataset.load_coco(COCO_DIR, \"train\")\ndataset.prepare()\n\n# Print class names\nprint(dataset.class_names)\n```\n\nWe don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)", "_____no_output_____" ] ], [ [ "# COCO Class names\n# Index of the class in the list is its ID. For example, to get ID of\n# the teddy bear class, use: class_names.index('teddy bear')\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']", "_____no_output_____" ] ], [ [ "## Run Object Detection", "_____no_output_____" ] ], [ [ "# Load a random image from the images folder\nfile_names = next(os.walk(IMAGE_DIR))[2]\nimage = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))\n\n# Run detection\nresults = model.detect([image], verbose=1)\n\n# Visualize results\nr = results[0]\nvisualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], \n class_names, r['scores'])", "_____no_output_____" ], [ "fn = \"/home/jchilders/car_videos/10.07.17-10.07.40.mp4\"\ncap = cv2.VideoCapture(fn)\nfps = cap.get(cv2.CAP_PROP_FPS)\nprint('frames per second: %d' % fps)\n\nframes = []\nret, frame = cap.read()\ntimestamp = [cap.get(cv2.CAP_PROP_POS_MSEC)]\nframes.append(frame)\ndata = []\nwhile(ret):\n \n if len(frames) == 10:\n results = model.detect(frames)\n for i in range(len(results)):\n r = results[i]\n rois = r['rois'].tolist()\n masks = r['masks'] * 1\n class_ids = r['class_ids']\n size = []\n position = []\n pixel_size = []\n class_name = []\n for i in range(len(rois)):\n size.append([ rois[i][2] - rois[i][0],\n rois[i][3] - rois[i][1] ])\n position.append([ rois[i][0]+int(float(size[-1][0])/2.),\n rois[i][1]+int(float(size[-1][1])/2.) ] )\n pixel_size.append(int(masks[i].sum()))\n class_name.append(class_names[class_ids[i]])\n data.append({'size': size, \n 'position': position,\n 'pixel_size': pixel_size,\n 'timestamp': timestamp[i],\n 'rois':rois,\n 'class_ids':r['class_ids'].tolist(),\n 'class_names':class_name,\n 'scores':r['scores'].tolist()})\n# clear_output(wait=True)\n# visualize.display_instances(frames[i], r['rois'], r['masks'], r['class_ids'], \n# class_names, r['scores'])\n# print(r['rois'])\n# print(r['class_ids'])\n# print(r['scores'])\n json.dump(data,open('%s_fps%d.json' % (os.path.basename(fn),fps),'w'),indent=2, sort_keys=True)\n\n frames = []\n timestamp = []\n ret, frame = cap.read()\n timestamp.append(cap.get(cv2.CAP_PROP_POS_MSEC))\n frames.append(frame)", "_____no_output_____" ], [ "fn = \"/home/jchilders/car_videos/10.07.17-10.07.40.mp4\"\n", "_____no_output_____" ], [ "class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\ndef get_video_data(fn,model,batch_size,show_img=False):\n cap = cv2.VideoCapture(fn)\n fps = cap.get(cv2.CAP_PROP_FPS)\n print('frames per second: %d' % fps)\n\n frames = []\n ret, frame = cap.read()\n timestamp = [cap.get(cv2.CAP_PROP_POS_MSEC)]\n frames.append(frame)\n data = []\n output = {'filename:': fn, \n 'fps': fps,\n 'timestamp': str(time.ctime(os.path.getmtime(fn))),\n 'data': data}\n while(ret):\n\n if len(frames) == batch_size:\n results = model.detect(frames)\n for i in range(len(results)):\n r = results[i]\n rois = r['rois'].tolist()\n masks = r['masks'] * 1\n class_ids = r['class_ids']\n size = []\n position = []\n pixel_size = []\n class_name = []\n for i in range(len(rois)):\n size.append([ rois[i][2] - rois[i][0],\n rois[i][3] - rois[i][1] ])\n position.append([ rois[i][0]+int(float(size[-1][0])/2.),\n rois[i][1]+int(float(size[-1][1])/2.) ] )\n pixel_size.append(int(masks[i].sum()))\n class_name.append(class_names[class_ids[i]])\n data.append({'size': size, \n 'position': position,\n 'pixel_size': pixel_size,\n 'frametime': timestamp[i],\n 'rois':rois,\n 'class_ids':r['class_ids'].tolist(),\n 'class_names':class_name,\n 'scores':r['scores'].tolist()})\n if show_img:\n clear_output(wait=True)\n vr = results[0]\n visualize.display_instances(frames[0], vr['rois'], vr['masks'], vr['class_ids'], \n class_names, vr['scores'])\n# print(r['rois'])\n# print(r['class_ids'])\n# print(r['scores'])\n# json.dump(data,open('%s_fps%d.json' % (os.path.basename(fn),fps),'w'),indent=2, sort_keys=True)\n\n frames = []\n timestamp = []\n ret, frame = cap.read()\n timestamp.append(cap.get(cv2.CAP_PROP_POS_MSEC))\n frames.append(frame)\n return output", "_____no_output_____" ], [ "batch_size = 25\nclass InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = batch_size\n BATCH_SIZE = batch_size\nconfig = InferenceConfig()\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n", "WARNING:tensorflow:From /home/jchilders/conda/mask_rcnn/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nWARNING:tensorflow:From /home/jchilders/conda/mask_rcnn/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\nWARNING:tensorflow:From /home/jchilders/git/Mask_RCNN/mrcnn/model.py:341: The name tf.log is deprecated. Please use tf.math.log instead.\n\nWARNING:tensorflow:From /home/jchilders/git/Mask_RCNN/mrcnn/model.py:399: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nWARNING:tensorflow:From /home/jchilders/git/Mask_RCNN/mrcnn/model.py:423: calling crop_and_resize_v1 (from tensorflow.python.ops.image_ops_impl) with box_ind is deprecated and will be removed in a future version.\nInstructions for updating:\nbox_ind is deprecated, use box_indices instead\nWARNING:tensorflow:From /home/jchilders/git/Mask_RCNN/mrcnn/model.py:720: The name tf.sets.set_intersection is deprecated. Please use tf.sets.intersection instead.\n\nWARNING:tensorflow:From /home/jchilders/git/Mask_RCNN/mrcnn/model.py:722: The name tf.sparse_tensor_to_dense is deprecated. Please use tf.sparse.to_dense instead.\n\nWARNING:tensorflow:From /home/jchilders/git/Mask_RCNN/mrcnn/model.py:772: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.cast` instead.\n" ], [ "\nfilelist = open('/home/jchilders/car_videos/filelist.txt').readlines()\nprint('files: %d' % len(filelist))\noutput = []\nfor i,line in enumerate(filelist):\n print(' %s of %s' % (i,len(filelist)))\n fn = line.strip()\n fn_output = get_video_data(fn,model,batch_size,show_img=True)\n print(fn_output)\n clear_output(wait=True)\n output.append(fn_output)\n\njson.dump(output,open('full_data.json'))", "files: 345\n 0 of 345\nframes per second: 25\nWARNING:tensorflow:From /home/jchilders/conda/mask_rcnn/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d056ba5e1c3f4ea364e138ce943a2b5a43d0707d
158,137
ipynb
Jupyter Notebook
course/perceptron-MNIST-simulation.ipynb
cecconeurale/neunet-basics
96b0cffb93275a70a18086a5b61b90852c000a7a
[ "MIT" ]
null
null
null
course/perceptron-MNIST-simulation.ipynb
cecconeurale/neunet-basics
96b0cffb93275a70a18086a5b61b90852c000a7a
[ "MIT" ]
null
null
null
course/perceptron-MNIST-simulation.ipynb
cecconeurale/neunet-basics
96b0cffb93275a70a18086a5b61b90852c000a7a
[ "MIT" ]
null
null
null
177.085106
88,590
0.875728
[ [ [ "# The perceptron - Recognising the MNIST digits", "_____no_output_____" ], [ "<div>Table of contents</div>\n<div id=\"toc\"></div>", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom pylab import *\nfrom utils import *", "_____no_output_____" ] ], [ [ "Let us implement a perceptron that categorize the MNIST images as numbers. As you will see below the behaviour of the network is far from optimal. As we see the network [learns well the training set](#Plotting-the-results-of-test). Nevertheless its behaviour in a [test with new digits](#Spreading-of-the-network-during-test) is far from optimal. **The task we are asking the network to learn is too difficult!!**\n### Training\n#### Initializing data and parameters\nFirst we initialize the dataset (see [The MNIST dataset](http://francesco-mannella.github.io/neunet-basics/mnist.html)), the we define few parameters and initialize the main variables:", "_____no_output_____" ] ], [ [ "#-----------------------------------------------------------\n# training\n\n# Set the number of patterns \nn_patterns = 500\n\n# Take 'n_patterns' rows\nindices = arange(training_length)\nshuffle(indices)\nindices = indices[:n_patterns]\n\n# Get patterns\npatterns = array(mndata.train_images)[indices]\n\n# Rescale all patterns between 0 and 1\npatterns = sign(patterns/255.0)\n\n# Get the labels of the patterns\nlabels = array(mndata.train_labels)[indices]\n\n# Constants\n\n# Number of repetitions of \n# the pattern series\nepochs = 30\n\n# Number of trials for learning\nstime = epochs*n_patterns\n\n# Create a list of pattern indices.\n# We will reshuffle it at each \n# repetition of the series\npattern_indices = arange(n_patterns)\n\n# Learning rate\neta = 0.0001\n\n# Number of output units \n\nm = 10\n\n# the input is given\n# by a 28*28 vector)\nn = n_pixels\n\n# Variables \n\n# Init weights\nw = zeros([m, n+1])\n\n# Init input units\nx = zeros(n)\n\n# init net input\nnet = zeros(m)\n\n# Init output units\ny = zeros(m)\n\n# Init desired output vector\ny_target = zeros(m)\n\n# We will store the input, output and error history\ninput_store = zeros([n,stime])\noutput_store = zeros([m,stime])\nlabel_store = zeros([m,stime])\nsquared_errors = zeros(epochs)", "_____no_output_____" ] ], [ [ "Let us visualize the first 20 patterns of the trining set:", "_____no_output_____" ] ], [ [ "for i in xrange(20):\n # Create a new figure after each 10-th item\n if i%10 == 0:\n fig = figure(figsize = (20, 1))\n \n # Plot current item (we use the \n # function plot_img in our utils.py)\n plot_img( to_mat(patterns[i]), \n fig, (i%10)+1, windows = 20 )\n \n # show figure after all 1o items\n # are plotted\n if i%10 == 9:\n show()", "_____no_output_____" ] ], [ [ "#### Spreading of the network during training\nHere starts the core part, iterating the timesteps. We also divide the training phase in epochs. Each epoch is a single presentation of the whole input pattern series. The sum of squared errors will be grouped by epochs.", "_____no_output_____" ] ], [ [ "# counter of repetitions \n# of the series of patterns\nepoch = -1\n\n\n# Iterate trials\nfor t in xrange(stime) :\n \n # Reiterate the input pattern \n # sequence through timesteps\n \n # Reshuffle at the end \n # of the series\n if t%n_patterns == 0:\n shuffle(pattern_indices)\n epoch += 1\n \n # Current pattern \n k = pattern_indices[t%n_patterns]\n \n # Aggregate inputs and the bias unit\n x = hstack([ 1, patterns[k] ])\n \n # Only the unit representing the desired \n # category is set to 1\n y_target *= 0\n y_target[labels[k]] = 1\n \n # !!!! The dot product becomes a matrix \n # product with more than one output unit !!!!\n net = dot(w,x) \n \n # output function\n y = step(net)\n \n # Learning - outer product\n w += eta*outer(y_target - y, x);\n \n # Store data\n input_store[:,t] = x[1:]\n output_store[:,t] = y\n label_store[:,t] = y_target\n \n squared_errors[epoch] += 0.5*sum((y_target - y)**2)", "_____no_output_____" ] ], [ [ "#### Plotting the results of training\nWe plot the history of the squared errors through epocs a", "_____no_output_____" ] ], [ [ "fig = figure()\nax = fig.add_subplot(111)\nax.plot(squared_errors)\nxlabel(\"Epochs\")\nylabel(\"Sum of squared errors\")", "_____no_output_____" ] ], [ [ "and a visualization of the weights to each ouput unit. Each set of weights seems to reproduce (in a very raugh manner) a generalization of the target digit.", "_____no_output_____" ] ], [ [ "figure(figsize=(15,2))\nfor i in xrange(m) : \n subplot(1,m,i+1)\n title(i)\n im = to_mat(w[i,1:]) \n imshow(im, cmap=cm.bone)\n axis('off')\n\nshow()", "_____no_output_____" ] ], [ [ "### Testing\n#### Initializing data and parameters\nNow we create a new dataset to test the network and reset some variables: ", "_____no_output_____" ] ], [ [ "#-----------------------------------------------------------\n# test\n\n# Set the number of patterns \nn_patterns = 1000\n\n# Take 'n_patterns' rows\nindices = arange(test_length)\nshuffle(indices)\nindices = indices[:n_patterns]\n\n# Get patterns\npatterns = array(mndata.test_images)[indices]\n\n# Rescale all patterns between 0 and 1\npatterns = sign(patterns/255.0)\n\n# Get the labels of the patterns\nlabels = array(mndata.test_labels)[indices]\n\n# Constants\n\n# Create a list of pattern indices.\n# We will reshuffle it at each \n# repetition of the series\npattern_indices = arange(n_patterns)\nshuffle(pattern_indices)\n\n# Clear variables \n\nx *= 0\nnet *= 0\ny *= 0\n\n# We will store the input, output and error history\ninput_store = zeros([patterns.shape[1], n_patterns])\noutput_store = zeros([m, n_patterns])\ntarget_store = zeros(n_patterns)\nerror_store = zeros(n_patterns)", "_____no_output_____" ] ], [ [ "#### Spreading of the network during test\nThe network react to each test pattern in each spreading timestep:", "_____no_output_____" ] ], [ [ "# Iterate trials\nfor p in xrange(n_patterns) :\n \n # Aggregate inputs and the bias unit\n x = hstack([ 1, patterns[p] ])\n \n # !!!! The dot product becomes a matrix \n # product with more than one output unit !!!!\n net = dot(w,x) \n \n # output function\n y = step(net)\n \n y_index = squeeze(find(y==1))\n y_index_target = int(labels[p])\n \n error = 0\n if y_index.size < 2 :\n if y_index == y_index_target :\n error = 1 \n # store\n input_store[:,p] = x[1:]\n output_store[:,p] = y\n target_store[p] = labels[p]\n error_store[p] = error", "_____no_output_____" ] ], [ [ "Let us see what is the proportion of correct answers of the network:", "_____no_output_____" ] ], [ [ " \nprint \"Proportion of correct answers:{}\" \\\n .format(sum(error_store)/float(n_patterns))", "Proportion of correct answers:0.655\n" ] ], [ [ "#### Plotting the results of test\n\nNow we plot few test samples to get the real idea. For each sample we plot the input digit on the top, the answer of the network on the center and the target digit on the left. Squared brakets indicate that the network gave zero or more than one answer. ", "_____no_output_____" ] ], [ [ "import matplotlib.gridspec as gridspec\ngs = gridspec.GridSpec(8, 4*10)\n \nn_patterns = 20\n\nfor p in xrange(n_patterns) : \n \n im = to_mat(input_store[:,p]) \n \n k = p%10 \n \n if k==0 :\n fig = figure(figsize=(15,2))\n \n ax1 = fig.add_subplot(gs[:4,(k*4):(k*4+4)])\n ax1.imshow(im, cmap=cm.binary)\n ax1.set_axis_off()\n\n \n if error_store[p] == True :\n color = \"blue\"\n else :\n color = \"red\" \n \n y = squeeze(find(output_store[:,p]==1))\n y_target = int(labels[p])\n \n ax2 = fig.add_subplot(gs[4:6,(k*4):(k*4+4)])\n ax2.text(0.5,0.5,\"{}\".format(y),\n fontsize=\"16\", color=color)\n axis(\"off\")\n \n \n ax3 = fig.add_subplot(gs[6:,(k*4):(k*4+4)])\n ax3.text(0.5,0.5,\"{}\".format(y_target),\n fontsize = \"16\", color=color )\n axis(\"off\")\n \n if k == 9:\n show()", "_____no_output_____" ] ], [ [ "<br><br><br><br><br><br><br><br><br><br><br><br><br><br>\n<br><br><br><br><br><br><br><br><br><br><br><br><br><br>\n<br><br><br><br><br><br><br><br><br><br><br><br><br><br>\nThe next cell is just for styling", "_____no_output_____" ] ], [ [ "from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../style/ipybn.css\", \"r\").read()\n return HTML(styles)\ncss_styling()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d056cf61483a3cc6119671507df06bcf3c918e2e
883,506
ipynb
Jupyter Notebook
examples/BE240_Lecture4_Sub-SBML.ipynb
BuildACell/subsbml
b83c83afc1d673985f44c396c39d2f7aa4382fd0
[ "BSD-3-Clause" ]
4
2018-09-14T06:16:43.000Z
2021-05-02T19:14:05.000Z
examples/BE240_Lecture4_Sub-SBML.ipynb
BuildACell/subsbml
b83c83afc1d673985f44c396c39d2f7aa4382fd0
[ "BSD-3-Clause" ]
18
2018-10-27T02:45:21.000Z
2020-10-19T03:10:52.000Z
examples/BE240_Lecture4_Sub-SBML.ipynb
BuildACell/subsbml
b83c83afc1d673985f44c396c39d2f7aa4382fd0
[ "BSD-3-Clause" ]
4
2019-08-16T00:00:21.000Z
2021-01-09T07:05:09.000Z
1,147.41039
218,684
0.899294
[ [ [ "# BE 240 Lecture 4\n# Sub-SBML \n## Modeling diffusion, shared resources, and compartmentalized systems\n\n## _Ayush Pandey_\n", "_____no_output_____" ] ], [ [ "# This notebook is designed to be converted to a HTML slide show\n# To do this in the command prompt type (in the folder containing the notebook): \n# jupyter nbconvert BE240_Lecture4_Sub-SBML.ipynb --to slides", "_____no_output_____" ] ], [ [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "# An example:\n\n### Three different \"subsystems\" - each with its SBML model\n### Another \"signal in mixture\" subsystem - models signal in the environment / mixture \n### Using Sub-SBML we can obtain the combined model for such a system with\n* transport across membrane\n* shared resources : ATP, Ribosome etc\n* resolve naming conflicts (Ribo, Ribosome, RNAP, RNAPolymerase etc.)\n", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "# Installing Sub-SBML\n\n```\ngit clone https://github.com/BuildACell/subsbml.git\n```\ncd to `subsbml` directory then run the following command to install the package in your environment:\n```\n python setup.py install\n```\n\n# Dependencies:\n1. python-libsbml : Run `pip install python-libsbml`, if you don't have it already. You probably already have this installed as it is also a dependency for bioscrape\n1. A simulator: You will need a simulator of your choice to simulate the SBML models that Sub-SBML generates. Bioscrape is an example of a simulator and we will be using that for simulations.\n\n# Update your bioscrape installation\nFrom the bioscrape directory, run the following if you do not have a remote fork (your own Github fork of the original bioscrape repository - `biocircuits/bioscrape`. To list all remote repositories that your bioscrape directory is connected to you can run `git remote -v`. The `origin` in the next two commands corresponds to the biocircuits/bioscrape github repository (you should change it if your remote has a different name)\n```\ngit pull origin master\n\npython setup.py install\n```\n\n\nUpdate your BioCRNpyler installation as well - if you plan to use your own BioCRNpyler models with Sub-SBML. Run the same commands as for bioscrape from the BioCRNpyler directory. \n", "_____no_output_____" ], [ "## Sub-SBML notes:\n\n## On \"name\" and \"identifier\":\n> SBML elements can have a name and an identifier argument. A `name` is supposed to be a human readable name of the particular element in the model. On the other hand, an `identifier` is what the software tool reads. Hence, `identifier` argument in an SBML model is mandatory whereas `name` argument is optional. \n\nSub-SBML works with `name` arguments of various model components to figure out what components interact/get combined/shared etc. Bioscrape/BioCRNpyler and other common software tools generate SBML models with `name` arguments added to various components such as species, parameters. As an example, to combine two species, Sub-SBML looks at the names of the two species and if they are the same - they are combined together and given a new identifier but the name remains the same. \n\n## A simple Sub-SBML use case:\n\nA simple example where we have two different models : transcription and translation. Using Sub-SBML, we can combine these two together and run simulations. ", "_____no_output_____" ] ], [ [ "# Import statements\nfrom subsbml.Subsystem import createNewSubsystem, createSubsystem\n\nimport numpy as np\nimport pylab as plt", "_____no_output_____" ] ], [ [ "## Transcription Model:\n\nConsider the following simple transcription-only model where $G$ is a gene, $T$ is a transcript, and $S$ is the signaling molecule.\n\nWe can write the following reduced order dynamics:\n1. $G \\xrightarrow[]{\\rho_{tx}(G, S)} G + T$; \n\\begin{align} \n\\rho_{tx}(G, S) = G K_{X}\\frac{S^{2}}{K_{S}^{2}+S^{2}}\n\\\\\n\\end{align}\nHere, $S$ is the inducer signal that cooperatively activates the transcription of the gene $G$. Since, this is a positive activation of the gene by the inducer, we have a positive proportional Hill function.\n\n1. $T \\xrightarrow[]{\\delta} \\varnothing$; massaction kinetics at rate $\\delta$.\n\n## Translation model:\n\n1. $T \\xrightarrow[]{\\rho_{tl}(T)} T+X$; \n\\begin{align} \n\\rho_{tl}(T) = K_{TR} \\frac{T}{K_{R} + T}\n\\\\\n\\end{align}\nHere $X$ is the protein species.\nThe lumped parameters $K_{TR}$ and $K_R$ model effects due to ribosome saturation. This is the similar Hill function as derived in the enzymatic reaction example. \n\n1. $X \\xrightarrow[]{\\delta} \\varnothing$; massaction kinetics at rate $\\delta$.", "_____no_output_____" ] ], [ [ "# Import SBML models by creating Subsystem class objects\nss1 = createSubsystem('transcription_SBML_model.xml')\nss2 = createSubsystem('translation_SBML_model.xml')\n\nss1.renameSName('mRNA_T', 'T')\n# Combine the two subsystems together\ntx_tl_subsystem = ss1 + ss2\n\n# The longer way to do the same thing: \n\n# tx_tl_subsystem = createNewSubsystem()\n# tx_tl_subsystem.combineSubsystems([ss1,ss2], verbose = True)\n\n# Set signal concentration (input) - manually and get ID for protein X\n\nX_id = tx_tl_subsystem.getSpeciesByName('X').getId()\n# Writing a Subsystem to an SBML file (Export SBML)\n_ = tx_tl_subsystem.writeSBML('txtl_ss.xml')\ntx_tl_subsystem.setSpeciesAmount('S',10)", "_____no_output_____" ], [ "try:\n # Simulate with Bioscrape and plot the result\n timepoints = np.linspace(0,100,100)\n results, _ = tx_tl_subsystem.simulateWithBioscrape(timepoints)\n plt.plot(timepoints, results[X_id], linewidth = 3, label = 'S = 10')\n tx_tl_subsystem.setSpeciesAmount('S',5)\n results, _ = tx_tl_subsystem.simulateWithBioscrape(timepoints)\n plt.plot(timepoints, results[X_id], linewidth = 3, label = 'S = 5')\n plt.title('Protein X dynamics')\n plt.ylabel('[X]')\n plt.xlabel('Time')\n plt.legend()\n plt.show()\nexcept:\n print('Simulator not found')", "_____no_output_____" ], [ "# Viewing the change log for the changes that Sub-SBML made \n\n# print(ss1.changeLog)\n# print(ss2.changeLog)\nprint(tx_tl_subsystem.changeLog)", "{'default_bioscrape_generated_model_47961': 'default_bioscrape_generated_model_245758_combined', 'default_bioscrape_generated_model_245758': 'default_bioscrape_generated_model_245758_combined', 'mRNA_T_bioscrape_generated_model_245758': 'mRNA_T_bioscrape_generated_model_245758_1_combined', 'T_bioscrape_generated_model_47961': 'mRNA_T_bioscrape_generated_model_245758_1_combined', 'delta_bioscrape_generated_model_245758': 'delta_combined', 'delta_bioscrape_generated_model_47961': 'delta_combined', 'n_bioscrape_generated_model_245758': 'n_combined', 'n_bioscrape_generated_model_47961': 'n_combined'}\n" ] ], [ [ "## Signal induction model:\n\n1. $\\varnothing \\xrightarrow[]{\\rho(I)} S$; \n\\begin{align} \n\\rho(S) = K_{0} \\frac{I^2}{K_{I} + I^2}\n\\\\\n\\end{align}\nHere $S$ is the signal produced on induction by an inducer $I$.\nThe lumped parameters $K_{0}$ and $K_S$ model effects of cooperative production of the signal by the inducer. This is the similar Hill function as derived in the enzymatic reaction example. ", "_____no_output_____" ] ], [ [ "ss3 = createSubsystem('signal_in_mixture.xml')\n\n# Signal subsystem (production of signal molecule)\ncombined_ss = ss1 + ss2 + ss3 \n\n# Alternatively\ncombined_ss = createNewSubsystem()\ncombined_ss.combineSubsystems([ss1,ss2,ss3])\n\n# Writing a Subsystem to an SBML file (Export SBML)\ncombined_ss.writeSBML('txtl_combined.xml')\n\n# Set signal concentration (input) - manually and get ID for protein X\ncombined_ss.setSpeciesAmount('I',10)\nX_id = combined_ss.getSpeciesByName('X').getId()", "_____no_output_____" ], [ "try:\n # Simulate with Bioscrape and plot the result\n timepoints = np.linspace(0,100,100)\n results, _ = combined_ss.simulateWithBioscrape(timepoints)\n plt.plot(timepoints, results[X_id], linewidth = 3, label = 'I = 10')\n combined_ss.setSpeciesAmount('I',2)\n results, _ = combined_ss.simulateWithBioscrape(timepoints)\n plt.plot(timepoints, results[X_id], linewidth = 3, label = 'I = 5')\n plt.title('Protein X dynamics')\n plt.ylabel('[X]')\n plt.xlabel('Time')\n plt.legend()\n plt.show()\nexcept:\n print('Simulator not found')", "_____no_output_____" ], [ "combined_ss.changeLog", "_____no_output_____" ] ], [ [ "## What does Sub-SBML look for?\n\n1. For compartments: if two compartments have the same `name` and the same `size` attributes => they are combined together.\n1. For species: if two species have the same `name` attribute => they are combined together. If initial amount is not the same, the first amount is set. It is easy to set species amounts later.\n1. For parameters: if two paraemters have the same `name` attribute **and** the same `value` => they are combined together.\n1. For reactions: if two reactions have the same `name` **and** the same reaction string (reactants -> products) => they are combined together. \n1. Other SBML components are also merged.", "_____no_output_____" ], [ "# Utility functions for Subsystems\n1. Set `verbose` keyword argument to `True` to get a list of detailed warning messages that describe the changes being made to the models. Helpful in debugging and creating clean models when combining multiple models.\n1. Use `renameSName` method for a `Subsystem` to rename any species' names throughout a model and `renameSIdRefs` to rename identifiers.\n1. Use `createBasicSubsystem()` function to get a basic \"empty\" subsystem model.\n1. Use `getSpeciesByName` to get all species with a given name in a Subsystem model.\n1. use `shareSubsystems` method similar to `combineSubsystems` method if you are only interested in getting a model with shared resource species combined together. \n1. Set `combineNames` keyword argument to `False` in `combineSubsystems` method to combine the Subsystem objects but treating the elements with the same `name` as different.", "_____no_output_____" ], [ "# Modeling transport across membranes \n![image.png](attachment:image.png)", "_____no_output_____" ], [ "## System 1 : TX-TL with IPTG reservoir and no membrane\n", "_____no_output_____" ] ], [ [ "from subsbml.System import System, combineSystems\n\ncell_1 = System('cell_1')\n\nss1 = createSubsystem('txtl_ss.xml')\nss1.renameSName('S', 'IPTG')\nss2 = createSubsystem('IPTG_reservoir.xml')\n\nIPTG_external_conc = ss2.getSpeciesByName('IPTG').getInitialConcentration()\n\ncell_1.setInternal([ss1])\ncell_1.setExternal([ss2])\n# cell_1.setMembrane() # Membrane-less system\nss1.setSpeciesAmount('IPTG', IPTG_external_conc)\n\ncell_1_model = cell_1.getModel() # Get a Subsystem object that represents the combined model for cell_1\ncell_1_model.writeSBML('cell_1_model.xml')", "_____no_output_____" ] ], [ [ "## System 2 : TX-TL with IPTG reservoir and a simple membrane\n\n### Membrane : IPTG external and internal diffusion in a one step reversible reaction", "_____no_output_____" ] ], [ [ "from subsbml import System, createSubsystem, combineSystems, createNewSubsystem\nss1 = createSubsystem('txtl_ss.xml')\nss1.renameSName('S','IPTG')\nss2 = createSubsystem('IPTG_reservoir.xml')\n\n# Create a simple IPTG membrane where IPTG goes in an out of the membrane via a reversible reaction\nmb2 = createSubsystem('membrane_IPTG.xml', membrane = True)\n\n# cell_2 = System('cell_2',ListOfInternalSubsystems = [ss1],\n# ListOfExternalSubsystems = [ss2],\n# ListOfMembraneSubsystems = [mb2])\ncell_2 = System('cell_2')\ncell_2.setInternal(ss1)\ncell_2.setExternal(ss2)\ncell_2.setMembrane(mb2)\ncell_2_model = cell_2.getModel()\ncell_2_model.setSpeciesAmount('IPTG', 1e4, compartment = 'cell_2_external')\ncell_2_model.writeSBML('cell_2_model.xml')", "The subsystem from membrane_IPTG.xml has multiple compartments\n" ] ], [ [ "## System 3 : TX-TL with IPTG reservoir and a detailed membrane diffusion\n\n### Membrane : IPTG external binds to a transport protein and forms a complex. This complex causes the diffusion of IPTG in the internal of the cell.", "_____no_output_____" ] ], [ [ "# Create a more detailed IPTG membrane where IPTG binds to an intermediate transporter protein, forms a complex\n# then transports out of the cell system to the external environment \n\nmb3 = createSubsystem('membrane_IPTG_detailed.xml', membrane = True)\n\ncell_3 = System('cell_3',ListOfInternalSubsystems = [ss1],\n ListOfExternalSubsystems = [ss2],\n ListOfMembraneSubsystems = [mb3])\ncell_3_model = cell_3.getModel()\ncell_3_model.setSpeciesAmount('IPTG', 1e4, compartment = 'cell_3_external')\ncell_3_model.writeSBML('cell_3_model.xml')\n\ncombined_model = combineSystems([cell_1, cell_2, cell_3])", "The subsystem from membrane_IPTG_detailed.xml has multiple compartments\n" ], [ "try:\n import numpy as np\n import matplotlib.pyplot as plt\n timepoints = np.linspace(0,2,100)\n results_1, _ = cell_1_model.simulateWithBioscrape(timepoints)\n results_2, _ = cell_2_model.simulateWithBioscrape(timepoints)\n results_3, _ = cell_3_model.simulateWithBioscrape(timepoints)\n\n X_id1 = cell_1_model.getSpeciesByName('X').getId()\n X_id2 = cell_2_model.getSpeciesByName('X', compartment = 'cell_2_internal').getId()\n X_id3 = cell_3_model.getSpeciesByName('X', compartment = 'cell_3_internal').getId()\n plt.plot(timepoints, results_1[X_id1], linewidth = 3, label = 'No membrane')\n plt.plot(timepoints, results_2[X_id2], linewidth = 3, label = 'Simple membrane')\n plt.plot(timepoints, results_3[X_id3], linewidth = 3, label = 'Advanced membrane')\n plt.xlabel('Time')\n plt.ylabel('[X]')\n plt.legend()\n plt.show()\n\n timepoints = np.linspace(0,200,100)\n results_1, _ = cell_1_model.simulateWithBioscrape(timepoints)\n results_2, _ = cell_2_model.simulateWithBioscrape(timepoints)\n results_3, _ = cell_3_model.simulateWithBioscrape(timepoints)\n X_id1 = cell_1_model.getSpeciesByName('X').getId()\n X_id2 = cell_2_model.getSpeciesByName('X', compartment = 'cell_2_internal').getId()\n X_id3 = cell_3_model.getSpeciesByName('X', compartment = 'cell_3_internal').getId()\n plt.plot(timepoints, results_1[X_id1], linewidth = 3, label = 'No membrane')\n plt.plot(timepoints, results_2[X_id2], linewidth = 3, label = 'Simple membrane')\n plt.plot(timepoints, results_3[X_id3], linewidth = 3, label = 'Advanced membrane')\n plt.xlabel('Time')\n plt.ylabel('[X]')\n plt.legend()\n plt.show()\nexcept:\n print('Simulator not found')", "_____no_output_____" ] ], [ [ "# Additional Sub-SBML Tools:\n\n* Create SBML models directly using `SimpleModel` class\n* Simulate directly using `bioscrape` or `libRoadRunner` with various simulation options\n* Various utility functions to edit SBML models:\n 1. Change species names/identifiers throughout an SBML model.\n 1. Edit parameter values or species initial conditions easily (directly in an SBML model).\n* `combineSystems` function can be used to combine multiple `System` objects together as shown in the previous cell. Also, a special use case interaction modeling function is available : `connectSubsystems`. Refer to the tutorial_interconnetion.ipynb notebook in the tutorials directory for more information about this.", "_____no_output_____" ], [ "# Things to Try:\n1. Compartmentalize your own SBML model - generate more than 1 model each with a different compartment names. Using tools in this notebook, try to combine your models together and regenerate the expected simulation.\n1. Implement a diffusion model and use it as a membrane model for a `System` of your choice.\n1. Implement an even more complicated diffusion model for the above example and run the simulation.\n1. **The package has not been tested extensively. So, it would be really great if you could raise [issues](https://github.com/BuildACell/subsbml/issues) on Github if you face any errors with your models. Also, feel free to send a message on Slack channel or DM.**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d056dded81e73c24b952c78889809c27f1cd2ac8
457,559
ipynb
Jupyter Notebook
11-Extended-Kalman-Filters.ipynb
galuardi/Kalman-and-Bayesian-Filters-in-Python
e52e2ca206f36572c5a9823ec9b5762158a35812
[ "CC-BY-4.0" ]
null
null
null
11-Extended-Kalman-Filters.ipynb
galuardi/Kalman-and-Bayesian-Filters-in-Python
e52e2ca206f36572c5a9823ec9b5762158a35812
[ "CC-BY-4.0" ]
null
null
null
11-Extended-Kalman-Filters.ipynb
galuardi/Kalman-and-Bayesian-Filters-in-Python
e52e2ca206f36572c5a9823ec9b5762158a35812
[ "CC-BY-4.0" ]
1
2019-11-19T13:46:33.000Z
2019-11-19T13:46:33.000Z
242.608165
39,664
0.880584
[ [ [ "[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)", "_____no_output_____" ], [ "# The Extended Kalman Filter", "_____no_output_____" ] ], [ [ "#format the book\n%matplotlib inline\nfrom __future__ import division, print_function\nfrom book_format import load_style\nload_style()", "_____no_output_____" ] ], [ [ "At this point in the book we have developed the theory for the linear Kalman filter. Then, in the last two chapters we broached the topic of using Kalman filters for nonlinear problems. In this chapter we will learn the Extended Kalman filter (EKF). The EKF handles nonlinearity by linearizing the system at the point of the current estimate, and then the linear Kalman filter is used to filter this linearized system. It was one of the very first techniques used for nonlinear problems, and it remains the most common technique. \n\nThe EKF provides significant mathematical challenges to the designer of the filter; this is the most challenging chapter of the book. To be honest, I do everything I can to avoid the EKF in favor of other techniques that have been developed to filter nonlinear problems. However, the topic is unavoidable; all classic papers and a majority of current papers in the field use the EKF. Even if you do not use the EKF in your own work you will need to be familiar with the topic to be able to read the literature. ", "_____no_output_____" ], [ "## Linearizing the Kalman Filter\n\nThe Kalman filter uses linear equations, so it does not work with nonlinear problems. Problems can be nonlinear in two ways. First, the process model might be nonlinear. An object falling through the atmosphere encounters drag which reduces its acceleration. The amount of drag varies based on the velocity the object. The resulting behavior is nonlinear - it cannot be modeled with linear equations. Second, the measurements could be nonlinear. For example, a radar gives a range and bearing to a target. We use trigonometry, which is nonlinear, to compute the position of the target.\n\nFor the linear filter we have these equations for the process and measurement models:\n\n$$\\begin{aligned}\\overline{\\mathbf x} &= \\mathbf{Ax} + \\mathbf{Bu} + w_x\\\\\n\\mathbf z &= \\mathbf{Hx} + w_z\n\\end{aligned}$$\n\nFor the nonlinear model these equations must be modified to read:\n\n$$\\begin{aligned}\\overline{\\mathbf x} &= f(\\mathbf x, \\mathbf u) + w_x\\\\\n\\mathbf z &= h(\\mathbf x) + w_z\n\\end{aligned}$$\n\nThe linear expression $\\mathbf{Ax} + \\mathbf{Bu}$ is replaced by a nonlinear function $f(\\mathbf x, \\mathbf u)$, and the linear expression $\\mathbf{Hx}$ is replaced by a nonlinear function $h(\\mathbf x)$.\n\nYou might imagine that we proceed by finding a new set of Kalman filter equations that optimally solve these equations. But if you remember the charts in the **Nonlinear Filtering** chapter you'll recall that passing a Gaussian through a nonlinear function results in a probability distribution that is no longer Gaussian. So this will not work.\n\nThe EKF does not alter the Kalman filter's linear equations. Instead, it *linearizes* the nonlinear equations at the point of the current estimate, and uses this linearization in the linear Kalman filter. \n\n*Linearize* means what it sounds like. We find a line that most closely matches the curve at a defined point. The graph below linearizes the parabola $f(x)=x^2−2x$ at $x=1.5$.", "_____no_output_____" ] ], [ [ "import ekf_internal\nekf_internal.show_linearization()", "_____no_output_____" ] ], [ [ "If the curve above is the process model, then the dotted lines shows the linearization of that curve for the estimate $x=1.5$.\n\nWe linearize systems by finding the slope of the curve at the given point:\n\n$$\\begin{aligned}\nf(x) &= x^2 -2x \\\\\n\\frac{df}{dx} &= 2x - 2\n\\end{aligned}$$\n\nand then finding its value at the evaluation point:\n\n$$\\begin{aligned}m &= f'(x=1.5) \\\\&= 2(1.5) - 2 \\\\&= 1\\end{aligned}$$ \n\nOur math will be more complicated because we are working with systems of differential equations. We linearize $f(\\mathbf x, \\mathbf u)$, and $h(\\mathbf x)$ by taking the partial derivatives ($\\frac{\\partial}{\\partial \\mathbf x}$) of each to evaluate $\\mathbf A$ and $\\mathbf H$ at the point $\\mathbf x_t$ and $\\mathbf u_t$. This gives us the the system dynamics matrix and measurement model matrix:\n\n$$\n\\begin{aligned}\n\\mathbf A \n&= {\\frac{\\partial{f(\\mathbf x_t, \\mathbf u_t)}}{\\partial{\\mathbf x}}}\\biggr|_{{\\mathbf x_t},{\\mathbf u_t}} \\\\\n\\mathbf H &= \\frac{\\partial{h(\\mathbf x_t)}}{\\partial{\\mathbf x}}\\biggr|_{\\mathbf x_t} \n\\end{aligned}\n$$", "_____no_output_____" ], [ "Finally, we find the discrete state transition matrix $\\mathbf F$ by using the approximation of the Taylor-series expansion of $e^{\\mathbf A \\Delta t}$:\n\n$$\\mathbf F = e^{\\mathbf A\\Delta t} = \\mathbf{I} + \\mathbf A\\Delta t + \\frac{(\\mathbf A\\Delta t)^2}{2!} + \\frac{(\\mathbf A\\Delta t)^3}{3!} + ... $$\n\nAlternatively, you can use one of the other techniques we learned in the **Kalman Math** chapter. \n\nThis leads to the following equations for the EKF. I placed them beside the equations for the linear Kalman filter, and put boxes around the only changes:\n\n$$\\begin{array}{l|l}\n\\text{linear Kalman filter} & \\text{EKF} \\\\\n\\hline \n& \\boxed{\\mathbf A = {\\frac{\\partial{f(\\mathbf x_t, \\mathbf u_t)}}{\\partial{\\mathbf x}}}\\biggr|_{{\\mathbf x_t},{\\mathbf u_t}}} \\\\\n& \\boxed{\\mathbf F = e^{\\mathbf A \\Delta t}} \\\\\n\\mathbf{\\overline x} = \\mathbf{Fx} + \\mathbf{Bu} & \\boxed{\\mathbf{\\overline x} = f(\\mathbf x, \\mathbf u)} \\\\\n\\mathbf{\\overline P} = \\mathbf{FPF}^\\mathsf{T}+\\mathbf Q & \\mathbf{\\overline P} = \\mathbf{FPF}^\\mathsf{T}+\\mathbf Q \\\\\n\\hline\n& \\boxed{\\mathbf H = \\frac{\\partial{h(\\mathbf x_t)}}{\\partial{\\mathbf x}}\\biggr|_{\\mathbf x_t}} \\\\\n\\textbf{y} = \\mathbf z - \\mathbf{H \\bar{x}} & \\textbf{y} = \\mathbf z -\\mathbf{H \\bar{x}}\\\\\n\\mathbf{K} = \\mathbf{\\bar{P}H}^\\mathsf{T} (\\mathbf{H\\bar{P}H}^\\mathsf{T} + \\mathbf R)^{-1} & \\mathbf{K} = \\mathbf{\\bar{P}H}^\\mathsf{T} (\\mathbf{H\\bar{P}H}^\\mathsf{T} + \\mathbf R)^{-1} \\\\\n\\mathbf x=\\mathbf{\\bar{x}} +\\mathbf{K\\textbf{y}} & \\mathbf x=\\mathbf{\\bar{x}} +\\mathbf{K\\textbf{y}} \\\\\n\\mathbf P= (\\mathbf{I}-\\mathbf{KH})\\mathbf{\\bar{P}} & \\mathbf P= (\\mathbf{I}-\\mathbf{KH})\\mathbf{\\bar{P}}\n\\end{array}$$\n\nWe don't normally use $\\mathbf{Fx}$ to propagate the state for the EKF as the linearization causes inaccuracies. It is typical to compute $\\overline{\\mathbf x}$ using a numerical integration technique such as Euler or Runge Kutta. Thus I wrote $\\mathbf{\\overline x} = f(\\mathbf x, \\mathbf u)$.\n\nI think the easiest way to understand the EKF is to start off with an example. After we do a few examples you may want to come back and reread this section.", "_____no_output_____" ], [ "## Example: Tracking a Flying Airplane", "_____no_output_____" ], [ "We will start by simulating tracking an airplane by using ground based radar. We implemented a UKF for this problem in the last chapter. Now we will implement an EKF for the same problem so we can compare both the filter performance and the level of effort required to implement the filter.\n\nRadars work by emitting a beam of radio waves and scanning for a return bounce. Anything in the beam's path will reflects some of the signal back to the radar. By timing how long it takes for the reflected signal to get back to the radar the system can compute the *slant distance* - the straight line distance from the radar installation to the object.\n\nFor this example we want to take the slant range measurement from the radar and compute the horizontal position (distance of aircraft from the radar measured over the ground) and altitude of the aircraft, as in the diagram below.", "_____no_output_____" ] ], [ [ "import ekf_internal\nekf_internal.show_radar_chart()", "_____no_output_____" ] ], [ [ "This gives us the equality $x=\\sqrt{slant^2 - altitude^2}$. ", "_____no_output_____" ], [ "### Design the State Variables", "_____no_output_____" ], [ "We want to track the position of an aircraft assuming a constant velocity and altitude, and measurements of the slant distance to the aircraft. That means we need 3 state variables - horizontal distance, horizonal velocity, and altitude:\n\n$$\\mathbf x = \\begin{bmatrix}\\mathtt{distance} \\\\\\mathtt{velocity}\\\\ \\mathtt{altitude}\\end{bmatrix}= \\begin{bmatrix}x \\\\ \\dot x\\\\ y\\end{bmatrix}$$", "_____no_output_____" ], [ "### Design the Process Model", "_____no_output_____" ], [ "We assume a Newtonian, kinematic system for the aircraft. We've used this model in previous chapters, so by inspection you may recognize that we want\n\n$$\\mathbf F = \\left[\\begin{array}{cc|c} 1 & \\Delta t & 0\\\\\n0 & 1 & 0 \\\\ \\hline\n0 & 0 & 1\\end{array}\\right]$$\n\nI've partioned the matrix into blocks to show the upper left block is a constant velocity model for $x$, and the lower right block is a constant position model for $y$.\n\nHowever, let's practice finding these matrix for a nonlinear system. We model nonlinear systems with a set of differential equations. We need an equation in the form \n\n$$\\dot{\\mathbf x} = \\mathbf{Ax} + \\mathbf{w}$$\nwhere $\\mathbf{w}$ is the system noise. \n\nThe variables $x$ and $y$ are independent so we can compute them separately. The differential equations for motion in one dimension are:\n\n$$\\begin{aligned}v &= \\dot x \\\\\na &= \\ddot{x} = 0\\end{aligned}$$\n\nNow we put the differential equations into state-space form. If this was a second or greater order differential system we would have to first reduce them to an equivalent set of first degree equations. The equations are first order, so we put them in state space matrix form as\n\n$$\\begin{aligned}\\begin{bmatrix}\\dot x \\\\ \\ddot{x}\\end{bmatrix} &= \\begin{bmatrix}0&1\\\\0&0\\end{bmatrix} \\begin{bmatrix}x \\\\ \n\\dot x\\end{bmatrix} \\\\ \\dot{\\mathbf x} &= \\mathbf{Ax}\\end{aligned}$$\nwhere $\\mathbf A=\\begin{bmatrix}0&1\\\\0&0\\end{bmatrix}$. \n\nRecall that $\\mathbf A$ is the *system dynamics matrix*. It describes a set of linear differential equations. From it we must compute the state transition matrix $\\mathbf F$. $\\mathbf F$ describes a discrete set of linear equations which compute $\\mathbf x$ for a discrete time step $\\Delta t$.\n\nand solve the following power series expansion of the matrix exponential to linearize the equations at $t$:\n\n$$\\mathbf F(\\Delta t) = e^{\\mathbf A\\Delta t} = \\mathbf{I} + \\mathbf A\\Delta t + \\frac{(\\mathbf A\\Delta t)^2}{2!} + \\frac{(\\mathbf A \\Delta t)^3}{3!} + ... $$\n\n\n$\\mathbf A^2 = \\begin{bmatrix}0&0\\\\0&0\\end{bmatrix}$, so all higher powers of $\\mathbf A$ are also $\\mathbf{0}$. Thus the power series expansion is:\n\n$$\n\\begin{aligned}\n\\mathbf F(\\Delta t) &=\\mathbf{I} + \\mathbf At + \\mathbf{0} \\\\\n&= \\begin{bmatrix}1&0\\\\0&1\\end{bmatrix} + \\begin{bmatrix}0&1\\\\0&0\\end{bmatrix}\\Delta t\\\\\n&= \\begin{bmatrix}1&t\\\\0&1\\end{bmatrix}\n\\end{aligned}$$\n\nThis give us\n$$\n\\begin{aligned}\n\\mathbf{\\overline x} &=\\mathbf{Fx} \\\\\n\\mathbf{\\overline x} &=\\begin{bmatrix}1&\\Delta t\\\\0&1\\end{bmatrix}\\mathbf x\n\\end{aligned}$$\n\nThis is the same result used by the kinematic equations! This exercise was unnecessary other than to illustrate linearizing differential equations. Subsequent examples will require you to use these techniques. ", "_____no_output_____" ], [ "### Design the Measurement Model", "_____no_output_____" ], [ "The measurement function for our filter needs to take the filter state $\\mathbf x$ and turn it into a measurement, which is the slant range distance. We use the Pythagorean theorem to derive\n\n$$h(\\mathbf x) = \\sqrt{x^2 + y^2}$$\n\nThe relationship between the slant distance and the position on the ground is nonlinear due to the square root term. To use it in the EKF we must linearize it. As we discussed above, the best way to linearize an equation at a point is to find its slope, which we do by evaluatiing its partial derivative at a point:\n\n$$\n\\mathbf H = \\frac{\\partial{h(\\mathbf x)}}{\\partial{\\mathbf x}}\\biggr|_{\\mathbf x_t}\n$$\n\nThe partial derivative of a matrix is called a Jacobian, and takes the form \n\n$$\\frac{\\partial \\mathbf H}{\\partial \\mathbf x} = \n\\begin{bmatrix}\n\\frac{\\partial h_1}{\\partial x_1} & \\frac{\\partial h_1}{\\partial x_2} &\\dots \\\\\n\\frac{\\partial h_2}{\\partial x_1} & \\frac{\\partial h_2}{\\partial x_2} &\\dots \\\\\n\\vdots & \\vdots\n\\end{bmatrix}\n$$\n\nIn other words, each element in the matrix is the partial derivative of the function $h$ with respect to the variables $x$. For our problem we have\n\n$$\\mathbf H = \\begin{bmatrix}{\\partial h}/{\\partial x} & {\\partial h}/{\\partial \\dot{x}} & {\\partial h}/{\\partial y}\\end{bmatrix}$$\n\nwhere $h(x) = \\sqrt{x^2 + y^2}$.\n\nSolving each in turn:\n\n$$\\begin{aligned}\n\\frac{\\partial h}{\\partial x} &= \\frac{\\partial}{\\partial x} \\sqrt{x^2 + y^2} \\\\\n&= \\frac{x}{\\sqrt{x^2 + y^2}}\n\\end{aligned}$$\n\nand\n\n$$\\begin{aligned}\n\\frac{\\partial h}{\\partial \\dot{x}} &=\n\\frac{\\partial}{\\partial \\dot{x}} \\sqrt{x^2 + y^2} \\\\ \n&= 0\n\\end{aligned}$$\n\nand\n\n$$\\begin{aligned}\n\\frac{\\partial h}{\\partial y} &= \\frac{\\partial}{\\partial y} \\sqrt{x^2 + y^2} \\\\ \n&= \\frac{y}{\\sqrt{x^2 + y^2}}\n\\end{aligned}$$\n\ngiving us \n\n$$\\mathbf H = \n\\begin{bmatrix}\n\\frac{x}{\\sqrt{x^2 + y^2}} & \n0 &\n&\n\\frac{y}{\\sqrt{x^2 + y^2}}\n\\end{bmatrix}$$\n\nThis may seem daunting, so step back and recognize that all of this math is doing something very simple. We have an equation for the slant range to the airplane which is nonlinear. The Kalman filter only works with linear equations, so we need to find a linear equation that approximates $\\mathbf H$. As we discussed above, finding the slope of a nonlinear equation at a given point is a good approximation. For the Kalman filter, the 'given point' is the state variable $\\mathbf x$ so we need to take the derivative of the slant range with respect to $\\mathbf x$. \n\nTo make this more concrete, let's now write a Python function that computes the Jacobian of $\\mathbf H$ for this problem. The `ExtendedKalmanFilter` class will be using this to generate `ExtendedKalmanFilter.H` at each step of the process.", "_____no_output_____" ] ], [ [ "from math import sqrt\ndef HJacobian_at(x):\n \"\"\" compute Jacobian of H matrix at x \"\"\"\n\n horiz_dist = x[0]\n altitude = x[2]\n denom = sqrt(horiz_dist**2 + altitude**2)\n return array ([[horiz_dist/denom, 0., altitude/denom]])", "_____no_output_____" ] ], [ [ "Finally, let's provide the code for $h(\\mathbf x)$", "_____no_output_____" ] ], [ [ "def hx(x):\n \"\"\" compute measurement for slant range that\n would correspond to state x.\n \"\"\"\n \n return (x[0]**2 + x[2]**2) ** 0.5", "_____no_output_____" ] ], [ [ "Now lets write a simulation for our radar.", "_____no_output_____" ] ], [ [ "from numpy.random import randn\nimport math\n\nclass RadarSim(object):\n \"\"\" Simulates the radar signal returns from an object\n flying at a constant altityude and velocity in 1D. \n \"\"\"\n \n def __init__(self, dt, pos, vel, alt):\n self.pos = pos\n self.vel = vel\n self.alt = alt\n self.dt = dt\n \n def get_range(self):\n \"\"\" Returns slant range to the object. Call once \n for each new measurement at dt time from last call.\n \"\"\"\n \n # add some process noise to the system\n self.vel = self.vel + .1*randn()\n self.alt = self.alt + .1*randn()\n self.pos = self.pos + self.vel*self.dt\n \n # add measurement noise\n err = self.pos * 0.05*randn()\n slant_dist = math.sqrt(self.pos**2 + self.alt**2)\n \n return slant_dist + err", "_____no_output_____" ] ], [ [ "### Design Process and Measurement Noise\n\nThe radar returns the range distance. A good radar can achieve accuracy of $\\sigma_{range}= 5$ meters, so we will use that value. This gives us\n\n$$\\mathbf R = \\begin{bmatrix}\\sigma_{range}^2\\end{bmatrix} = \\begin{bmatrix}25\\end{bmatrix}$$\n\n\nThe design of $\\mathbf Q$ requires some discussion. The state $\\mathbf x= \\begin{bmatrix}x & \\dot x & y\\end{bmatrix}^\\mathtt{T}$. The first two elements are position (down range distance) and velocity, so we can use `Q_discrete_white_noise` noise to compute the values for the upper left hand side of $\\mathbf Q$. The third element of $\\mathbf x$ is altitude, which we are assuming is independent of the down range distance. That leads us to a block design of $\\mathbf Q$ of:\n\n$$\\mathbf Q = \\begin{bmatrix}\\mathbf Q_\\mathtt{x} & 0 \\\\ 0 & \\mathbf Q_\\mathtt{y}\\end{bmatrix}$$\n\n\n### Implementation\n\nThe `FilterPy` library provides the class `ExtendedKalmanFilter`. It works very similar to the `KalmanFilter` class we have been using, except that it allows you to provide functions that compute the Jacobian of $\\mathbf H$ and the function $h(\\mathbf x)$. We have already written the code for these two functions, so let's get going.\n\nWe start by importing the filter and creating it. There are 3 variables in `x` and only 1 measurement. At the same time we will create our radar simulator.\n\n```python\nfrom filterpy.kalman import ExtendedKalmanFilter\n\nrk = ExtendedKalmanFilter(dim_x=3, dim_z=1)\nradar = RadarSim(dt, pos=0., vel=100., alt=1000.)\n```\n\nWe will initialize the filter near the airplane's actual position\n\n```python\nrk.x = array([radar.pos, radar.vel-10, radar.alt+100])\n```\n\nWe assign the system matrix using the first term of the Taylor series expansion we computed above.\n\n```python\ndt = 0.05\nrk.F = eye(3) + array([[0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]])*dt\n```\n\nAfter assigning reasonable values to $\\mathbf R$, $\\mathbf Q$, and $\\mathbf P$ we can run the filter with a simple loop\n\n```python\nfor i in range(int(20/dt)):\n z = radar.get_range()\n rk.update(array([z]), HJacobian_at, hx)\n rk.predict()\n```\n\nAdding some boilerplate code to save and plot the results we get:", "_____no_output_____" ] ], [ [ "from filterpy.common import Q_discrete_white_noise\nfrom filterpy.kalman import ExtendedKalmanFilter\nfrom numpy import eye, array, asarray\nimport numpy as np\n\ndt = 0.05\nrk = ExtendedKalmanFilter(dim_x=3, dim_z=1)\nradar = RadarSim(dt, pos=0., vel=100., alt=1000.)\n\n# make an imperfect starting guess\nrk.x = array([radar.pos-100, radar.vel+100, radar.alt+1000])\n\nrk.F = eye(3) + array([[0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]]) * dt\n\nrange_std = 5. # meters\nrk.R = np.diag([range_std**2])\nrk.Q[0:2, 0:2] = Q_discrete_white_noise(2, dt=dt, var=0.1)\nrk.Q[2,2] = 0.1\nrk.P *= 50\n\nxs, track = [], []\nfor i in range(int(20/dt)):\n z = radar.get_range()\n track.append((radar.pos, radar.vel, radar.alt))\n \n rk.update(array([z]), HJacobian_at, hx)\n xs.append(rk.x)\n rk.predict()\n\nxs = asarray(xs)\ntrack = asarray(track)\ntime = np.arange(0, len(xs)*dt, dt)\nekf_internal.plot_radar(xs, track, time)", "_____no_output_____" ] ], [ [ "## Using SymPy to compute Jacobians", "_____no_output_____" ], [ "Depending on your experience with derivatives you may have found the computation of the Jacobian difficult. Even if you found it easy, a slightly more difficult problem easily leads to very difficult computations.\n\nAs explained in Appendix A, we can use the SymPy package to compute the Jacobian for us.", "_____no_output_____" ] ], [ [ "import sympy\nsympy.init_printing(use_latex=True)\n\nx, x_vel, y = sympy.symbols('x, x_vel y')\n\nH = sympy.Matrix([sympy.sqrt(x**2 + y**2)])\n\nstate = sympy.Matrix([x, x_vel, y])\nH.jacobian(state)", "_____no_output_____" ] ], [ [ "This result is the same as the result we computed above, and with much less effort on our part!", "_____no_output_____" ], [ "## Robot Localization\n\nSo, time to try a real problem. I warn you that this is far from a simple problem. However, most books choose simple, textbook problems with simple answers, and you are left wondering how to implement a real world solution. \n\nWe will consider the problem of robot localization. We already implemented this in the **Unscented Kalman Filter** chapter, and I recommend you read that first. In this scenario we have a robot that is moving through a landscape with sensors that give range and bearings to various landmarks. This could be a self driving car using computer vision to identify trees, buildings, and other landmarks. It might be one of those small robots that vacuum your house, or a robot in a warehouse.\n\nOur robot has 4 wheels configured the same as an automobile. It maneuvers by pivoting the front wheels. This causes the robot to pivot around the rear axle while moving forward. This is nonlinear behavior which we will have to model. \n\nThe robot has a sensor that gives it approximate range and bearing to known targets in the landscape. This is nonlinear because computing a position from a range and bearing requires square roots and trigonometry. \n\nBoth the process model and measurement models are nonlinear. The UKF accommodates both, so we provisionally conclude that the UKF is a viable choice for this problem.\n\n### Robot Motion Model", "_____no_output_____" ], [ "At a first approximation an automobile steers by pivoting the front tires while moving forward. The front of the car moves in the direction that the wheels are pointing while pivoting around the rear tires. This simple description is complicated by issues such as slippage due to friction, the differing behavior of the rubber tires at different speeds, and the need for the outside tire to travel a different radius than the inner tire. Accurately modeling steering requires a complicated set of differential equations. \n\nFor Kalman filtering, especially for lower speed robotic applications a simpler *bicycle model* has been found to perform well. This is a depiction of the model:", "_____no_output_____" ] ], [ [ "ekf_internal.plot_bicycle()", "_____no_output_____" ] ], [ [ "In the **Unscented Kalman Filter** chapter we derived these equations describing for this model:\n\n$$\\begin{aligned} x &= x - R\\sin(\\theta) + R\\sin(\\theta + \\beta) \\\\\ny &= y + R\\cos(\\theta) - R\\cos(\\theta + \\beta) \\\\\n\\theta &= \\theta + \\beta\n\\end{aligned}\n$$\n\nwhere $\\theta$ is the robot's heading.\n\nYou do not need to understand this model in detail if you are not interested in steering models. The important thing to recognize is that our motion model is nonlinear, and we will need to deal with that with our Kalman filter.", "_____no_output_____" ], [ "### Design the State Variables\n\nFor our robot we will maintain the position and orientation of the robot:\n\n$$\\mathbf x = \\begin{bmatrix}x \\\\ y \\\\ \\theta\\end{bmatrix}$$\n\nOur control input $\\mathbf u$ is the velocity $v$ and steering angle $\\alpha$:\n\n$$\\mathbf u = \\begin{bmatrix}v \\\\ \\alpha\\end{bmatrix}$$", "_____no_output_____" ], [ "### Design the System Model\n\nIn general we model our system as a nonlinear motion model plus noise.\n\n$$\\overline x = x + f(x, u) + \\mathcal{N}(0, Q)$$\n\nUsing the motion model for a robot that we created above, we can expand this to\n\n$$\\overline{\\begin{bmatrix}x\\\\y\\\\\\theta\\end{bmatrix}} = \\begin{bmatrix}x\\\\y\\\\\\theta\\end{bmatrix} + \n\\begin{bmatrix}- R\\sin(\\theta) + R\\sin(\\theta + \\beta) \\\\\nR\\cos(\\theta) - R\\cos(\\theta + \\beta) \\\\\n\\beta\\end{bmatrix}$$\n\nWe linearize this with a taylor expansion at $x$:\n\n$$f(x, u) \\approx \\mathbf x + \\frac{\\partial f(\\mathbf x, \\mathbf u)}{\\partial x}\\biggr|_{\\mathbf x, \\mathbf u} $$\n\nWe replace $f(x, u)$ with our state estimate $\\mathbf x$, and the derivative is the Jacobian of $f$.", "_____no_output_____" ], [ "The Jacobian $\\mathbf F$ is\n\n$$\\mathbf F = \\frac{\\partial f(x, u)}{\\partial x} =\\begin{bmatrix}\n\\frac{\\partial \\dot x}{\\partial x} & \n\\frac{\\partial \\dot x}{\\partial y} &\n\\frac{\\partial \\dot x}{\\partial \\theta}\\\\\n\\frac{\\partial \\dot y}{\\partial x} & \n\\frac{\\partial \\dot y}{\\partial y} &\n\\frac{\\partial \\dot y}{\\partial \\theta} \\\\\n\\frac{\\partial \\dot{\\theta}}{\\partial x} & \n\\frac{\\partial \\dot{\\theta}}{\\partial y} &\n\\frac{\\partial \\dot{\\theta}}{\\partial \\theta}\n\\end{bmatrix}\n$$\n\nWhen we calculate these we get\n\n$$\\mathbf F = \\begin{bmatrix}\n1 & 0 & -R\\cos(\\theta) + R\\cos(\\theta+\\beta) \\\\\n0 & 1 & -R\\sin(\\theta) + R\\sin(\\theta+\\beta) \\\\\n0 & 0 & 1\n\\end{bmatrix}$$\n\nWe can double check our work with SymPy.", "_____no_output_____" ] ], [ [ "import sympy\nfrom sympy.abc import alpha, x, y, v, w, R, theta\nfrom sympy import symbols, Matrix\nsympy.init_printing(use_latex=\"mathjax\", fontsize='16pt')\ntime = symbols('t')\nd = v*time\nbeta = (d/w)*sympy.tan(alpha)\nr = w/sympy.tan(alpha)\n\nfxu = Matrix([[x-r*sympy.sin(theta) + r*sympy.sin(theta+beta)],\n [y+r*sympy.cos(theta)- r*sympy.cos(theta+beta)],\n [theta+beta]])\nJ = fxu.jacobian(Matrix([x, y, theta]))\nJ", "_____no_output_____" ] ], [ [ "That looks a bit complicated. We can use SymPy to substitute terms:", "_____no_output_____" ] ], [ [ "# reduce common expressions\nB, R = symbols('beta, R')\nJ = J.subs((d/w)*sympy.tan(alpha), B)\nJ.subs(w/sympy.tan(alpha), R)", "_____no_output_____" ] ], [ [ "In that form we can see that our computation of the Jacobian is correct.\n\nNow we can turn our attention to the noise. Here, the noise is in our control input, so it is in *control space*. In other words, we command a specific velocity and steering angle, but we need to convert that into errors in $x, y, \\theta$. In a real system this might vary depending on velocity, so it will need to be recomputed for every prediction. I will choose this as the noise model; for a real robot you will need to choose a model that accurately depicts the error in your system. \n\n$$\\mathbf{M} = \\begin{bmatrix}\\sigma_{vel}^2 & 0 \\\\ 0 & \\sigma_\\alpha^2\\end{bmatrix}$$\n\nIf this was a linear problem we would convert from control space to state space using the by now familiar $\\mathbf{FMF}^\\mathsf T$ form. Since our motion model is nonlinear we do not try to find a closed form solution to this, but instead linearize it with a Jacobian which we will name $\\mathbf{V}$. \n\n$$\\mathbf{V} = \\frac{\\partial f(x, u)}{\\partial u} \\begin{bmatrix}\n\\frac{\\partial \\dot x}{\\partial v} & \\frac{\\partial \\dot x}{\\partial \\alpha} \\\\\n\\frac{\\partial \\dot y}{\\partial v} & \\frac{\\partial \\dot y}{\\partial \\alpha} \\\\\n\\frac{\\partial \\dot{\\theta}}{\\partial v} & \\frac{\\partial \\dot{\\theta}}{\\partial \\alpha}\n\\end{bmatrix}$$\n\nThese partial derivatives become very difficult to work with. Let's compute them with SymPy. ", "_____no_output_____" ] ], [ [ "V = fxu.jacobian(Matrix([v, alpha]))\nV = V.subs(sympy.tan(alpha)/w, 1/R) \nV = V.subs(time*v/R, B)\nV = V.subs(time*v, 'd')\nV", "_____no_output_____" ] ], [ [ "This should give you an appreciation of how quickly the EKF become mathematically intractable. \n\nThis gives us the final form of our prediction equations:\n\n$$\\begin{aligned}\n\\mathbf{\\overline x} &= \\mathbf x + \n\\begin{bmatrix}- R\\sin(\\theta) + R\\sin(\\theta + \\beta) \\\\\nR\\cos(\\theta) - R\\cos(\\theta + \\beta) \\\\\n\\beta\\end{bmatrix}\\\\\n\\mathbf{\\overline P} &=\\mathbf{FPF}^{\\mathsf T} + \\mathbf{VMV}^{\\mathsf T}\n\\end{aligned}$$\n\nOne final point. This form of linearization is not the only way to predict $\\mathbf x$. For example, we could use a numerical integration technique like *Runge Kutta* to compute the position of the robot in the future. In fact, if the time step is relatively large you will have to do that. As I am sure you are realizing, things are not as cut and dried with the EKF as it was for the KF. For a real problem you have to very carefully model your system with differential equations and then determine the most appropriate way to solve that system. The correct approach depends on the accuracy you require, how nonlinear the equations are, your processor budget, and numerical stability concerns. These are all topics beyond the scope of this book.", "_____no_output_____" ], [ "### Design the Measurement Model\n\nNow we need to design our measurement model. For this problem we are assuming that we have a sensor that receives a noisy bearing and range to multiple known locations in the landscape. The measurement model must convert the state $\\begin{bmatrix}x & y&\\theta\\end{bmatrix}^\\mathsf T$ into a range and bearing to the landmark. Using $p$ be the position of a landmark, the range $r$ is\n\n$$r = \\sqrt{(p_x - x)^2 + (p_y - y)^2}$$\n\nWe assume that the sensor provides bearing relative to the orientation of the robot, so we must subtract the robot's orientation from the bearing to get the sensor reading, like so:\n\n$$\\phi = \\arctan(\\frac{p_y - y}{p_x - x}) - \\theta$$\n\n\nThus our function is\n\n\n$$\\begin{aligned}\n\\mathbf x& = h(x,p) &+ \\mathcal{N}(0, R)\\\\\n&= \\begin{bmatrix}\n\\sqrt{(p_x - x)^2 + (p_y - y)^2} \\\\\n\\arctan(\\frac{p_y - y}{p_x - x}) - \\theta \n\\end{bmatrix} &+ \\mathcal{N}(0, R)\n\\end{aligned}$$\n\nThis is clearly nonlinear, so we need linearize $h(x, p)$ at $\\mathbf x$ by taking its Jacobian. We compute that with SymPy below.", "_____no_output_____" ] ], [ [ "px, py = symbols('p_x, p_y')\nz = Matrix([[sympy.sqrt((px-x)**2 + (py-y)**2)],\n [sympy.atan2(py-y, px-x) - theta]])\nz.jacobian(Matrix([x, y, theta]))", "_____no_output_____" ] ], [ [ "Now we need to write that as a Python function. For example we might write:", "_____no_output_____" ] ], [ [ "from math import sqrt\n\ndef H_of(x, landmark_pos):\n \"\"\" compute Jacobian of H matrix where h(x) computes \n the range and bearing to a landmark for state x \"\"\"\n\n px = landmark_pos[0]\n py = landmark_pos[1]\n hyp = (px - x[0, 0])**2 + (py - x[1, 0])**2\n dist = sqrt(hyp)\n\n H = array(\n [[-(px - x[0, 0]) / dist, -(py - x[1, 0]) / dist, 0],\n [ (py - x[1, 0]) / hyp, -(px - x[0, 0]) / hyp, -1]])\n return H", "_____no_output_____" ] ], [ [ "We also need to define a function that converts the system state into a measurement.", "_____no_output_____" ] ], [ [ "from math import atan2\ndef Hx(x, landmark_pos):\n \"\"\" takes a state variable and returns the measurement\n that would correspond to that state.\n \"\"\"\n px = landmark_pos[0]\n py = landmark_pos[1]\n dist = sqrt((px - x[0, 0])**2 + (py - x[1, 0])**2)\n\n Hx = array([[dist],\n [atan2(py - x[1, 0], px - x[0, 0]) - x[2, 0]]])\n return Hx", "_____no_output_____" ] ], [ [ "### Design Measurement Noise\n\nThis is quite straightforward as we need to specify measurement noise in measurement space, hence it is linear. It is reasonable to assume that the range and bearing measurement noise is independent, hence\n\n$$R=\\begin{bmatrix}\\sigma_{range}^2 & 0 \\\\ 0 & \\sigma_{bearing}^2\\end{bmatrix}$$\n\n### Implementation\n\nWe will use `FilterPy`'s `ExtendedKalmanFilter` class to implement the filter. Its `predict()` method uses the standard linear equations. Our process model is nonlinear, so we will have to override `predict()` with our own version. I'll want to also use this class to simulate the robot, so I'll add a method `move()` that computes the position of the robot which both `predict()` and my simulation can call.\n\nThe matrices for the prediction step are quite large. While writing this code I made several errors before I finally got it working. I only found my errors by using SymPy's `evalf` function, which allows you to evaluate a SymPy `Matrix` with specific values for the variables. I decided to demonstrate this technique, and to eliminate a possible source of bugs, by using SymPy in the Kalman filter. You'll need to understand a couple of points.\n\nFirst, `evalf` uses a dictionary to pass in the values you want to use. For example, if your matrix contains an `x` and `y`, you can write\n\n```python\n M.evalf(subs={x:3, y:17})\n```\n \nto evaluate the matrix for `x=3` and `y=17`. \n\nSecond, `evalf` returns a `sympy.Matrix` object. Use `numpy.array(M).astype(float)` to convert it to a NumPy array. `numpy.array(M)` creates an array of type `object`, which is not what you want.\n\nHere is the code for the EKF:", "_____no_output_____" ] ], [ [ "from filterpy.kalman import ExtendedKalmanFilter as EKF\nfrom numpy import dot, array, sqrt\nclass RobotEKF(EKF):\n def __init__(self, dt, wheelbase, std_vel, std_steer):\n EKF.__init__(self, 3, 2, 2)\n self.dt = dt\n self.wheelbase = wheelbase\n self.std_vel = std_vel\n self.std_steer = std_steer\n\n a, x, y, v, w, theta, time = symbols(\n 'a, x, y, v, w, theta, t')\n d = v*time\n beta = (d/w)*sympy.tan(a)\n r = w/sympy.tan(a)\n \n self.fxu = Matrix(\n [[x-r*sympy.sin(theta)+r*sympy.sin(theta+beta)],\n [y+r*sympy.cos(theta)-r*sympy.cos(theta+beta)],\n [theta+beta]])\n\n self.F_j = self.fxu.jacobian(Matrix([x, y, theta]))\n self.V_j = self.fxu.jacobian(Matrix([v, a]))\n\n # save dictionary and it's variables for later use\n self.subs = {x: 0, y: 0, v:0, a:0, \n time:dt, w:wheelbase, theta:0}\n self.x_x, self.x_y, = x, y \n self.v, self.a, self.theta = v, a, theta\n\n def predict(self, u=0):\n self.x = self.move(self.x, u, self.dt)\n\n self.subs[self.theta] = self.x[2, 0]\n self.subs[self.v] = u[0]\n self.subs[self.a] = u[1]\n\n F = array(self.F_j.evalf(subs=self.subs)).astype(float)\n V = array(self.V_j.evalf(subs=self.subs)).astype(float)\n\n # covariance of motion noise in control space\n M = array([[self.std_vel*u[0]**2, 0], \n [0, self.std_steer**2]])\n\n self.P = dot(F, self.P).dot(F.T) + dot(V, M).dot(V.T)\n\n def move(self, x, u, dt):\n hdg = x[2, 0]\n vel = u[0]\n steering_angle = u[1]\n dist = vel * dt\n\n if abs(steering_angle) > 0.001: # is robot turning?\n beta = (dist / self.wheelbase) * tan(steering_angle)\n r = self.wheelbase / tan(steering_angle) # radius\n\n dx = np.array([[-r*sin(hdg) + r*sin(hdg + beta)], \n [r*cos(hdg) - r*cos(hdg + beta)], \n [beta]])\n else: # moving in straight line\n dx = np.array([[dist*cos(hdg)], \n [dist*sin(hdg)], \n [0]])\n return x + dx", "_____no_output_____" ] ], [ [ "Now we have another issue to handle. The residual is notionally computed as $y = z - h(x)$ but this will not work because our measurement contains an angle in it. Suppose z has a bearing of $1^\\circ$ and $h(x)$ has a bearing of $359^\\circ$. Naively subtracting them would yield a bearing difference of $-358^\\circ$, which will throw off the computation of the Kalman gain. The correct angle difference in this case is $2^\\circ$. So we will have to write code to correctly compute the bearing residual.", "_____no_output_____" ] ], [ [ "def residual(a, b):\n \"\"\" compute residual (a-b) between measurements containing \n [range, bearing]. Bearing is normalized to [-pi, pi)\"\"\"\n y = a - b\n y[1] = y[1] % (2 * np.pi) # force in range [0, 2 pi)\n if y[1] > np.pi: # move to [-pi, pi)\n y[1] -= 2 * np.pi\n return y", "_____no_output_____" ] ], [ [ "The rest of the code runs the simulation and plots the results, and shouldn't need too much comment by now. I create a variable `landmarks` that contains the coordinates of the landmarks. I update the simulated robot position 10 times a second, but run the EKF only once. This is for two reasons. First, we are not using Runge Kutta to integrate the differental equations of motion, so a narrow time step allows our simulation to be more accurate. Second, it is fairly normal in embedded systems to have limited processing speed. This forces you to run your Kalman filter only as frequently as absolutely needed.", "_____no_output_____" ] ], [ [ "from filterpy.stats import plot_covariance_ellipse\nfrom math import sqrt, tan, cos, sin, atan2\nimport matplotlib.pyplot as plt\n\ndt = 1.0\n\ndef z_landmark(lmark, sim_pos, std_rng, std_brg):\n x, y = sim_pos[0, 0], sim_pos[1, 0]\n d = np.sqrt((lmark[0] - x)**2 + (lmark[1] - y)**2) \n a = atan2(lmark[1] - y, lmark[0] - x) - sim_pos[2, 0]\n z = np.array([[d + randn()*std_rng],\n [a + randn()*std_brg]])\n return z\n\ndef ekf_update(ekf, z, landmark):\n ekf.update(z, HJacobian=H_of, Hx=Hx, \n residual=residual,\n args=(landmark), hx_args=(landmark))\n \n \ndef run_localization(landmarks, std_vel, std_steer, \n std_range, std_bearing,\n step=10, ellipse_step=20, ylim=None):\n ekf = RobotEKF(dt, wheelbase=0.5, std_vel=std_vel, \n std_steer=std_steer)\n ekf.x = array([[2, 6, .3]]).T # x, y, steer angle\n ekf.P = np.diag([.1, .1, .1])\n ekf.R = np.diag([std_range**2, std_bearing**2])\n\n sim_pos = ekf.x.copy() # simulated position\n # steering command (vel, steering angle radians)\n u = array([1.1, .01]) \n\n plt.scatter(landmarks[:, 0], landmarks[:, 1],\n marker='s', s=60)\n \n track = []\n for i in range(200):\n sim_pos = ekf.move(sim_pos, u, dt/10.) # simulate robot\n track.append(sim_pos)\n\n if i % step == 0:\n ekf.predict(u=u)\n\n if i % ellipse_step == 0:\n plot_covariance_ellipse(\n (ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2], \n std=6, facecolor='k', alpha=0.3)\n\n x, y = sim_pos[0, 0], sim_pos[1, 0]\n for lmark in landmarks:\n z = z_landmark(lmark, sim_pos,\n std_range, std_bearing)\n ekf_update(ekf, z, lmark)\n\n if i % ellipse_step == 0:\n plot_covariance_ellipse(\n (ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],\n std=6, facecolor='g', alpha=0.8)\n track = np.array(track)\n plt.plot(track[:, 0], track[:,1], color='k', lw=2)\n plt.axis('equal')\n plt.title(\"EKF Robot localization\")\n if ylim is not None: plt.ylim(*ylim)\n plt.show()\n return ekf", "_____no_output_____" ], [ "landmarks = array([[5, 10], [10, 5], [15, 15]])\n\nekf = run_localization(\n landmarks, std_vel=0.1, std_steer=np.radians(1),\n std_range=0.3, std_bearing=0.1)\nprint('Final P:', ekf.P.diagonal())", "_____no_output_____" ] ], [ [ "I have plotted the landmarks as solid squares. The path of the robot is drawn with black line. The covariance ellipses for the predict step is light gray, and the covariances of the update are shown in green. To make them visible at this scale I have set the ellipse boundary at 6$\\sigma$.\n\nFrom this we can see that there is a lot of uncertainty added by our motion model, and that most of the error in in the direction of motion. We can see that from the shape of the blue ellipses. After a few steps we can see that the filter incorporates the landmark measurements.\n\nI used the same initial conditions and landmark locations in the UKF chapter. You can see both in the plot and in the printed final value for $\\mathbf P$ that the UKF achieves much better accuracy in terms of the error ellipse. The black solid line denotes the robot's actual path. Both perform roughly as well as far as their estimate for $\\mathbf x$ is concerned. \n\nNow lets add another landmark.", "_____no_output_____" ] ], [ [ "landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5]])\n\nekf = run_localization(\n landmarks, std_vel=0.1, std_steer=np.radians(1),\n std_range=0.3, std_bearing=0.1)\nplt.show()\nprint('Final P:', ekf.P.diagonal())", "_____no_output_____" ] ], [ [ "The uncertainly in the estimates near the end of the track are smaller with the additional landmark. We can see the fantastic effect that multiple landmarks has on our uncertainty by only using the first two landmarks.", "_____no_output_____" ] ], [ [ "ekf = run_localization(\n landmarks[0:2], std_vel=1.e-10, std_steer=1.e-10,\n std_range=1.4, std_bearing=.05)\nprint('Final P:', ekf.P.diagonal())", "_____no_output_____" ] ], [ [ "The estimate quickly diverges from the robot's path after passing the landmarks. The covariance also grows quickly. Let's see what happens with only one landmark:", "_____no_output_____" ] ], [ [ "ekf = run_localization(\n landmarks[0:1], std_vel=1.e-10, std_steer=1.e-10,\n std_range=1.4, std_bearing=.05)\nprint('Final P:', ekf.P.diagonal())", "_____no_output_____" ] ], [ [ "As you probably suspected, only one landmark produces a very bad result. Conversely, a large number of landmarks allows us to make very accurate estimates.", "_____no_output_____" ] ], [ [ "landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5], [15, 10], \n [10,14], [23, 14], [25, 20], [10, 20]])\n\nekf = run_localization(\n landmarks, std_vel=0.1, std_steer=np.radians(1),\n std_range=0.3, std_bearing=0.1, ylim=(0, 21))\nprint('Final P:', ekf.P.diagonal())", "_____no_output_____" ] ], [ [ "### Discussion\n\nI said that this was a 'real' problem, and in some ways it is. I've seen alternative presentations that used robot motion models that led to much easier Jacobians. On the other hand, my model of a automobile's movement is itself simplistic in several ways. First, it uses a bicycle model. A real car has two sets of tires, and each travels on a different radius. The wheels do not grip the surface perfectly. I also assumed that the robot responds instantaneously to the control input. Sebastian Thrun writes in *Probabilistic Robots* that simplified models are justified because the filters perform well when used to track real vehicles. The lesson here is that while you have to have a reasonably accurate nonlinear model, it does not need to be perfect to operate well. As a designer you will need to balance the fidelity of your model with the difficulty of the math and the computation required to implement the equations. \n\nAnother way in which this problem was simplistic is that we assumed that we knew the correspondance between the landmarks and measurements. But suppose we are using radar - how would we know that a specific signal return corresponded to a specific building in the local scene? This question hints at SLAM algorithms - simultaneous localization and mapping. SLAM is not the point of this book, so I will not elaborate on this topic. ", "_____no_output_____" ], [ "## UKF vs EKF\n\nI implemented this tracking problem using the UKF in the previous chapter. The difference in implementation should be very clear. Computing the Jacobians for the state and measurement models was not trivial despite a rudimentary motion model. I am justified in using this model because the research resulting from the DARPA car challenges has shown that it works well in practice. A different problem could result in a Jacobian which is difficult or impossible to derive analytically. In contrast, the UKF only requires you to provide a function that computes the system motion model and another for the measurement model. \n\nThere are many cases where the Jacobian cannot be found analytically. The details are beyond the scope of this book, but you will have to use numerical methods to compute the Jacobian. That is a very nontrivial undertaking, and you will spend a significant portion of a master's degree at a STEM school learning techniques to handle such situations. Even then you'll likely only be able to solve problems related to your field - an aeronautical engineer learns a lot about Navier Stokes equations, but not much about modelling chemical reaction rates. \n\nSo, UKFs are easy. Are they accurate? In practice they often perform better than the EKF. You can find plenty of research papers that prove that the UKF outperforms the EKF in various problem domains. It's not hard to understand why this would be true. The EKF works by linearizing the system model and measurement model at a single point, and the UKF uses $2n+1$ points.\n\nLet's look at a specific example. Take $f(x) = x^3$ and pass a Gaussian distribution through it. I will compute an accurate answer using a monte carlo simulation. I generate 50,000 points randomly distributed according to the Gaussian, pass each through $f(x)$, then compute the mean and variance of the result. \n\nFirst, let's see how the EKF fairs. The EKF linearizes the function by taking the derivative and evaluating it the mean $x$ to get the slope tangent to the function at that point. This slope becomes the linear function that we use to transform the Gaussian. Here is a plot of that.", "_____no_output_____" ] ], [ [ "import nonlinear_plots\nnonlinear_plots.plot_ekf_vs_mc()", "_____no_output_____" ] ], [ [ "We can see from both the graph and the print out at the bottom that the EKF has introduced quite a bit of error.\n\nIn contrast, here is the performance of the UKF:", "_____no_output_____" ] ], [ [ "nonlinear_plots.plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.)", "_____no_output_____" ] ], [ [ "Here we can see that the computation of the UKF's mean is accurate to 2 decimal places. The standard deviation is slightly off, but you can also fine tune how the UKF computes the distribution by using the $\\alpha$, $\\beta$, and $\\gamma$ parameters for generating the sigma points. Here I used $\\alpha=0.001$, $\\beta=3$, and $\\gamma=1$. Feel free to modify them in the function call to see the result. You should be able to get better results than I did. However, avoid over-tuning the UKF for a specific test. It may perform better for your test case, but worse in general.\n\nThis is a contrived example, but as I said the literature is filled with detailed studies of real world problems that exhibit similar performance differences between the two filters.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d056df17807050d0ad07093677820fd504240d83
23,132
ipynb
Jupyter Notebook
notebook/MIRNet_Low_Light_Train.ipynb
venkat2319/MIRnet
9b87594d5f0e66c8c08a13ffcd956a7a6f7138d9
[ "Apache-2.0" ]
null
null
null
notebook/MIRNet_Low_Light_Train.ipynb
venkat2319/MIRnet
9b87594d5f0e66c8c08a13ffcd956a7a6f7138d9
[ "Apache-2.0" ]
null
null
null
notebook/MIRNet_Low_Light_Train.ipynb
venkat2319/MIRnet
9b87594d5f0e66c8c08a13ffcd956a7a6f7138d9
[ "Apache-2.0" ]
null
null
null
45.535433
297
0.41769
[ [ [ "<a href=\"https://colab.research.google.com/github/soumik12345/MIRNet/blob/master/notebooks/MIRNet_Low_Light_Train.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!nvidia-smi", "Tue Dec 1 07:16:36 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 455.38 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 66C P8 11W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "!git clone https://github.com/venkat2319/MIRnet\n%cd MIRNet", "Cloning into 'MIRNet'...\nremote: Enumerating objects: 138, done.\u001b[K\nremote: Counting objects: 100% (138/138), done.\u001b[K\nremote: Compressing objects: 100% (104/104), done.\u001b[K\nremote: Total 138 (delta 52), reused 106 (delta 25), pack-reused 0\u001b[K\nReceiving objects: 100% (138/138), 9.84 MiB | 14.21 MiB/s, done.\nResolving deltas: 100% (52/52), done.\n/content/MIRNet\n" ], [ "!pip install -qq wandb", "\u001b[K |████████████████████████████████| 1.8MB 23.2MB/s \n\u001b[K |████████████████████████████████| 102kB 14.7MB/s \n\u001b[K |████████████████████████████████| 133kB 60.6MB/s \n\u001b[K |████████████████████████████████| 102kB 13.9MB/s \n\u001b[K |████████████████████████████████| 163kB 59.2MB/s \n\u001b[K |████████████████████████████████| 71kB 11.6MB/s \n\u001b[?25h Building wheel for watchdog (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for subprocess32 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for pathtools (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ], [ "from glob import glob\nimport tensorflow as tf\nfrom mirnet.train import LowLightTrainer\nfrom mirnet.utils import init_wandb, download_dataset\n\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)", "_____no_output_____" ], [ "download_dataset('LOL')", "Downloading dataset...\n" ], [ "init_wandb(\n project_name='mirnet',\n experiment_name='LOL_lowlight_experiment_2_256x256',\n wandb_api_key='cf0947ccde62903d4df0742a58b8a54ca4c11673'\n)", "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33m19soumik-rakshit96\u001b[0m (use `wandb login --relogin` to force relogin)\n" ], [ "trainer = LowLightTrainer()\ntrain_low_light_images = glob('./our485/low/*')\ntrain_high_light_images = glob('./our485/high/*')\nvalid_low_light_images = glob('./eval15/low/*')\nvalid_high_light_images = glob('./eval15/high/*')", "_____no_output_____" ], [ "trainer.build_dataset(\n train_low_light_images, train_high_light_images,\n valid_low_light_images, valid_high_light_images,\n crop_size=256, batch_size=2\n)\ntrainer.compile()", "_____no_output_____" ], [ "trainer.train(epochs=100, checkpoint_dir='./checkpoints')", "Epoch 1/100\n243/243 [==============================] - 442s 2s/step - loss: 0.1592 - psnr: 64.1155 - val_loss: 0.1249 - val_psnr: 65.5065\nEpoch 2/100\n243/243 [==============================] - 433s 2s/step - loss: 0.1509 - psnr: 64.5416 - val_loss: 0.1429 - val_psnr: 64.6090\nEpoch 3/100\n243/243 [==============================] - 431s 2s/step - loss: 0.1439 - psnr: 65.2337 - val_loss: 0.1303 - val_psnr: 65.2745\nEpoch 4/100\n243/243 [==============================] - 436s 2s/step - loss: 0.1285 - psnr: 65.8450 - val_loss: 0.1140 - val_psnr: 66.2238\nEpoch 5/100\n243/243 [==============================] - 438s 2s/step - loss: 0.1194 - psnr: 66.4395 - val_loss: 0.1135 - val_psnr: 66.3191\nEpoch 6/100\n243/243 [==============================] - 437s 2s/step - loss: 0.1102 - psnr: 67.0214 - val_loss: 0.1049 - val_psnr: 66.9083\nEpoch 7/100\n243/243 [==============================] - 430s 2s/step - loss: 0.1060 - psnr: 67.3232 - val_loss: 0.1132 - val_psnr: 66.4320\nEpoch 8/100\n243/243 [==============================] - 431s 2s/step - loss: 0.1069 - psnr: 67.3737 - val_loss: 0.1094 - val_psnr: 66.4070\nEpoch 9/100\n243/243 [==============================] - 431s 2s/step - loss: 0.1044 - psnr: 67.4384 - val_loss: 0.1210 - val_psnr: 65.9124\nEpoch 10/100\n243/243 [==============================] - 432s 2s/step - loss: 0.1080 - psnr: 67.3665 - val_loss: 0.1115 - val_psnr: 66.9443\nEpoch 11/100\n243/243 [==============================] - 438s 2s/step - loss: 0.1041 - psnr: 67.5550 - val_loss: 0.1022 - val_psnr: 67.0890\nEpoch 12/100\n243/243 [==============================] - 432s 2s/step - loss: 0.1043 - psnr: 67.4996 - val_loss: 0.1067 - val_psnr: 67.0900\nEpoch 13/100\n243/243 [==============================] - 438s 2s/step - loss: 0.1059 - psnr: 67.4166 - val_loss: 0.0977 - val_psnr: 67.6418\nEpoch 14/100\n243/243 [==============================] - 438s 2s/step - loss: 0.0989 - psnr: 67.9418 - val_loss: 0.0963 - val_psnr: 68.1703\nEpoch 15/100\n243/243 [==============================] - 438s 2s/step - loss: 0.1015 - psnr: 67.7641 - val_loss: 0.0939 - val_psnr: 68.4126\nEpoch 16/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0982 - psnr: 67.9785 - val_loss: 0.0951 - val_psnr: 68.0276\nEpoch 17/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0981 - psnr: 68.0821 - val_loss: 0.1169 - val_psnr: 66.7316\nEpoch 18/100\n243/243 [==============================] - 430s 2s/step - loss: 0.1004 - psnr: 67.8817 - val_loss: 0.1016 - val_psnr: 67.3338\nEpoch 19/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0959 - psnr: 68.2082 - val_loss: 0.0995 - val_psnr: 67.4730\nEpoch 20/100\n243/243 [==============================] - ETA: 0s - loss: 0.1026 - psnr: 67.7257\nEpoch 00020: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.\n243/243 [==============================] - 430s 2s/step - loss: 0.1026 - psnr: 67.7257 - val_loss: 0.1003 - val_psnr: 67.3352\nEpoch 21/100\n243/243 [==============================] - 436s 2s/step - loss: 0.0919 - psnr: 68.6314 - val_loss: 0.0826 - val_psnr: 69.3167\nEpoch 22/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0936 - psnr: 68.3714 - val_loss: 0.1035 - val_psnr: 67.5037\nEpoch 23/100\n243/243 [==============================] - 432s 2s/step - loss: 0.0896 - psnr: 68.7174 - val_loss: 0.0862 - val_psnr: 69.1280\nEpoch 24/100\n243/243 [==============================] - 435s 2s/step - loss: 0.0917 - psnr: 68.6044 - val_loss: 0.0796 - val_psnr: 69.3943\nEpoch 25/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0895 - psnr: 68.7495 - val_loss: 0.0898 - val_psnr: 68.8661\nEpoch 26/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0890 - psnr: 68.8537 - val_loss: 0.0983 - val_psnr: 68.0671\nEpoch 27/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0920 - psnr: 68.5267 - val_loss: 0.0975 - val_psnr: 67.9822\nEpoch 28/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0897 - psnr: 68.7615 - val_loss: 0.1008 - val_psnr: 67.6455\nEpoch 29/100\n243/243 [==============================] - ETA: 0s - loss: 0.0864 - psnr: 69.0921\nEpoch 00029: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-05.\n243/243 [==============================] - 430s 2s/step - loss: 0.0864 - psnr: 69.0921 - val_loss: 0.0940 - val_psnr: 68.5634\nEpoch 30/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0868 - psnr: 69.0637 - val_loss: 0.0917 - val_psnr: 68.4715\nEpoch 31/100\n243/243 [==============================] - 436s 2s/step - loss: 0.0845 - psnr: 69.2952 - val_loss: 0.0791 - val_psnr: 69.2714\nEpoch 32/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0867 - psnr: 69.1577 - val_loss: 0.0809 - val_psnr: 69.5186\nEpoch 33/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0860 - psnr: 69.3142 - val_loss: 0.0868 - val_psnr: 68.9742\nEpoch 34/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0858 - psnr: 69.1895 - val_loss: 0.0890 - val_psnr: 68.3421\nEpoch 35/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0860 - psnr: 69.1920 - val_loss: 0.0791 - val_psnr: 69.2999\nEpoch 36/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0867 - psnr: 69.2137 - val_loss: 0.0832 - val_psnr: 68.8973\nEpoch 37/100\n243/243 [==============================] - 436s 2s/step - loss: 0.0853 - psnr: 69.2414 - val_loss: 0.0716 - val_psnr: 70.2069\nEpoch 38/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0858 - psnr: 69.2487 - val_loss: 0.0895 - val_psnr: 68.9241\nEpoch 39/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0856 - psnr: 69.2450 - val_loss: 0.0896 - val_psnr: 68.6078\nEpoch 40/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0840 - psnr: 69.2957 - val_loss: 0.0752 - val_psnr: 69.8205\nEpoch 41/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0843 - psnr: 69.3408 - val_loss: 0.0924 - val_psnr: 68.4360\nEpoch 42/100\n243/243 [==============================] - ETA: 0s - loss: 0.0834 - psnr: 69.4409\nEpoch 00042: ReduceLROnPlateau reducing learning rate to 1.249999968422344e-05.\n243/243 [==============================] - 430s 2s/step - loss: 0.0834 - psnr: 69.4409 - val_loss: 0.0966 - val_psnr: 67.8450\nEpoch 43/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0803 - psnr: 69.8295 - val_loss: 0.0844 - val_psnr: 69.0862\nEpoch 44/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0801 - psnr: 69.7533 - val_loss: 0.0876 - val_psnr: 68.7879\nEpoch 45/100\n243/243 [==============================] - 431s 2s/step - loss: 0.0826 - psnr: 69.6043 - val_loss: 0.0934 - val_psnr: 68.2593\nEpoch 46/100\n243/243 [==============================] - 430s 2s/step - loss: 0.0789 - psnr: 69.9293 - val_loss: 0.0947 - val_psnr: 68.2925\nEpoch 47/100\n243/243 [==============================] - ETA: 0s - loss: 0.0803 - psnr: 69.7273\nEpoch 00047: ReduceLROnPlateau reducing learning rate to 6.24999984211172e-06.\n243/243 [==============================] - 429s 2s/step - loss: 0.0803 - psnr: 69.7273 - val_loss: 0.0935 - val_psnr: 68.1041\n" ], [ "from glob import glob\nfrom google.colab import files\n\n\nfor file in glob('/content/MIRNet/checkpoints/*'):\n files.download(file)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d057006db359e98fd2bcb678d2170101342a799e
1,939
ipynb
Jupyter Notebook
src/Notebooks/ObjectScript.ipynb
gjsjohnmurray/iris-python-template
49febf4438c58cbeccc64e26c96f07f68f2c5e9e
[ "MIT" ]
null
null
null
src/Notebooks/ObjectScript.ipynb
gjsjohnmurray/iris-python-template
49febf4438c58cbeccc64e26c96f07f68f2c5e9e
[ "MIT" ]
6
2022-02-13T17:22:54.000Z
2022-03-25T11:23:06.000Z
src/Notebooks/ObjectScript.ipynb
gjsjohnmurray/iris-python-template
49febf4438c58cbeccc64e26c96f07f68f2c5e9e
[ "MIT" ]
3
2022-02-15T02:20:18.000Z
2022-03-23T03:25:38.000Z
22.287356
232
0.553894
[ [ [ "# Using ObjectScript in a notebook\nThis notebook uses a kernel written in Python, which plugs into Jupyter to enable execution of ObjectScript inside IRIS. See `misc/kernels/objectscript/*` and `src/ObjectScript/Kernel/CodeExecutor.cls` for how this is done.\n\nIndenting each line with at least one space allows InterSystems Language Server to recognize the ObjectScript INT code correctly.", "_____no_output_____" ] ], [ [ " Set hello = \"helloworld2\"\n zw hello", "hello=\"helloworld2\"\r\n" ] ], [ [ "# Embedded Python in ObjectScript\nFrom ObjectScript, run some Python library methods.", "_____no_output_____" ] ], [ [ " set datetime = ##class(%SYS.Python).Import(\"datetime\")\n zw datetime\n zw datetime.date.today().isoformat()", "datetime=3@%SYS.Python ; <module 'datetime' from '/usr/lib/python3.8/datetime.py'> ; <OREF>\r\n\"2021-12-12\"\r\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0572ca4db06ae108243bedfab9b5fffac5d18fe
9,154
ipynb
Jupyter Notebook
qubiter/jupyter_notebooks/examples_of_placeholder_usage.ipynb
yourball/qubiter
5ef0ea064fa8c9f125f7951a01fbb88504a054a5
[ "Apache-2.0" ]
3
2019-10-03T04:27:36.000Z
2021-02-13T17:49:34.000Z
qubiter/jupyter_notebooks/examples_of_placeholder_usage.ipynb
yourball/qubiter
5ef0ea064fa8c9f125f7951a01fbb88504a054a5
[ "Apache-2.0" ]
null
null
null
qubiter/jupyter_notebooks/examples_of_placeholder_usage.ipynb
yourball/qubiter
5ef0ea064fa8c9f125f7951a01fbb88504a054a5
[ "Apache-2.0" ]
2
2020-10-07T15:22:19.000Z
2021-06-07T04:59:58.000Z
31.030508
764
0.594385
[ [ [ "# Examples of usage of Gate Angle Placeholder\n\nThe word \"Placeholder\" is used in Qubiter (we are in good company, Tensorflow uses this word in the same way) to mean a variable for which we delay/postpone assigning a numerical value (evaluating it) until a later time. In the case of Qubiter, it is useful to define gates with placeholders standing for angles. One can postpone evaluating those placeholders until one is ready to call the circuit simulator, and then pass the values of the placeholders as an argument to the simulator’s constructor. Placeholders of this type can be useful, for example, with quantum neural nets (QNNs). In some QNN algorithms, the circuit gate structure is fixed but the angles of the gates are varied many times, gradually, trying to lower a cost function each time.\n\n> In Qubiter, legal variable names must be of form `#3` or `-#3` or `#3*.5` or\n`-#3*.5` where 3 can be replaced by any non-negative int, and .5 can\nbe replaced by anything that can be an argument of float() without\nthrowing an exception. In this example, the 3 that follows the hash\ncharacter is called the variable number\n\n>NEW! (functional placeholder variables)\nNow legal variable names can ALSO be of the form `my_fun#1#2` or\n`-my_fun#1#2`, where\n* the 1 and 2 can be replaced by any non-negative integers and there\nmight be any number > 0 of hash variables. Thus, there need not\nalways be precisely 2 hash variables as in the example.\n* `my_fun` can be replaced by the name of any function with one or\nmore input floats (2 inputs in the example), as long as the first\ncharacter of the function's name is a lower case letter.\n\n>The strings `my_fun#1#2` or `-my_fun#1#2` indicate than one wants to\nuse for the angle being replaced, the values of `my_fun(#1, #2)` or\n`-my_fun(#1, #2)`, respectively, where the inputs #1 and #2 are\nfloats standing for radians and the output is also a float standing\nfor radians.\n\n", "_____no_output_____" ] ], [ [ "import os\nimport sys\nprint(os.getcwd())\nos.chdir('../../')\nprint(os.getcwd())\nsys.path.insert(0,os.getcwd())", "C:\\Users\\rrtuc\\Desktop\\backedup\\python-projects\\qubiter\\qubiter\\jupyter-notebooks\nC:\\Users\\rrtuc\\Desktop\\backedup\\python-projects\\qubiter\n" ] ], [ [ "We begin by writing a simple circuit with 4 qubits. As usual, the following code will\nwrite an English and a Picture file in the `io_folder` directory. Note that some\nangles have been entered into the write() Python functions as legal\nvariable names instead of floats. In the English file, you will see those legal\nnames where the numerical values of those angles would have been.", "_____no_output_____" ] ], [ [ "from qubiter.SEO_writer import *\nfrom qubiter.SEO_reader import *\nfrom qubiter.EchoingSEO_reader import *\nfrom qubiter.SEO_simulator import *", "loaded OneBitGates, WITHOUT autograd.numpy\n" ], [ "num_bits = 4\nfile_prefix = 'placeholder_test'\nemb = CktEmbedder(num_bits, num_bits)\nwr = SEO_writer(file_prefix, emb)\nwr.write_Rx(2, rads=np.pi/7)\nwr.write_Rx(1, rads='#2*.5')\nwr.write_Rx(1, rads='my_fun1#2')\nwr.write_Rn(3, rads_list=['#1', '-#1*3', '#3'])\nwr.write_Rx(1, rads='-my_fun2#2#1')\nwr.write_cnot(2, 3)\nwr.close_files()", "_____no_output_____" ] ], [ [ "The following 2 files were just written:\n1. <a href='../io_folder/placeholder_test_4_eng.txt'>../io_folder/placeholder_test_4_eng.txt</a>\n2. <a href='../io_folder/placeholder_test_4_ZLpic.txt'>../io_folder/placeholder_test_4_ZLpic.txt</a>", "_____no_output_____" ], [ "Simply by creating an object of the class SEO_reader with the flag `write_log` set equal to True, you can create a log file which contains \n\n* a list of distinct variable numbers \n* a list of distinct function names\n\nencountered in the English file", "_____no_output_____" ] ], [ [ "rdr = SEO_reader(file_prefix, num_bits, write_log=True)", "_____no_output_____" ] ], [ [ "The following log file was just written:\n \n<a href='../io_folder/placeholder_test_4_log.txt'>../io_folder/placeholder_test_4_log.txt</a>", "_____no_output_____" ], [ "Next, let us create two functions that will be used for the functional placeholders", "_____no_output_____" ] ], [ [ "def my_fun1(x):\n return x*.5\n\ndef my_fun2(x, y):\n return x + y", "_____no_output_____" ] ], [ [ "**Partial Substitution**\n\nThis creates new files\n\nwith `#1=30`, `#2=60`, `'my_fun1'->my_fun1`,\n\nbut `#3` and `'my_fun2'` still undecided", "_____no_output_____" ] ], [ [ "vman = PlaceholderManager(eval_all_vars=False,\n var_num_to_rads={1: np.pi/6, 2: np.pi/3},\n fun_name_to_fun={'my_fun1': my_fun1})\nwr = SEO_writer(file_prefix + '_eval01', emb)\nEchoingSEO_reader(file_prefix, num_bits, wr,\n vars_manager=vman)", "_____no_output_____" ] ], [ [ "The following 2 files were just written:\n1. <a href='../io_folder/placeholder_test_eval01_4_eng.txt'>../io_folder/placeholder_test_eval01_4_eng.txt</a>\n2. <a href='../io_folder/placeholder_test_eval01_4_ZLpic.txt'>../io_folder/placeholder_test_eval01_4_ZLpic.txt</a>", "_____no_output_____" ], [ "The following code runs the simulator after substituting\n\n`#1=30`, `#2=60`, `#3=90`, `'my_fun1'->my_fun1`, `'my_fun2'->my_fun2`", "_____no_output_____" ] ], [ [ "vman = PlaceholderManager(\n var_num_to_rads={1: np.pi/6, 2: np.pi/3, 3: np.pi/2},\n fun_name_to_fun={'my_fun1': my_fun1, 'my_fun2': my_fun2}\n)\nsim = SEO_simulator(file_prefix, num_bits, verbose=False,\n vars_manager=vman)\nStateVec.describe_st_vec_dict(sim.cur_st_vec_dict)", "*********branch= pure\ntotal probability of state vector (=one if no measurements)= 1.0000000000000004\ndictionary with key=qubit, value=(Prob(0), Prob(1))\n{0: (1.0000000000000004, -4.440892098500626e-16),\n 1: (0.7500000000000002, 0.24999999999999978),\n 2: (0.811744900929367, 0.18825509907063298),\n 3: (0.6235127414399703, 0.37648725856002974)}\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0572ea4e0be15f1f9e634f4b37f7c10652d0dff
242,893
ipynb
Jupyter Notebook
docs/examples/the-art-of-using-pipelines.ipynb
dataJSA/river
93497bba53d11d21e862acfd656b3fba7cf05c9b
[ "BSD-3-Clause" ]
2,184
2020-11-11T12:31:12.000Z
2022-03-31T16:45:41.000Z
docs/examples/the-art-of-using-pipelines.ipynb
dataJSA/river
93497bba53d11d21e862acfd656b3fba7cf05c9b
[ "BSD-3-Clause" ]
262
2020-11-11T17:15:47.000Z
2022-03-31T23:54:03.000Z
docs/examples/the-art-of-using-pipelines.ipynb
dataJSA/river
93497bba53d11d21e862acfd656b3fba7cf05c9b
[ "BSD-3-Clause" ]
240
2020-11-11T14:25:03.000Z
2022-03-31T08:25:50.000Z
75.809301
770
0.461281
[ [ [ "# The art of using pipelines", "_____no_output_____" ], [ "Pipelines are a natural way to think about a machine learning system. Indeed with some practice a data scientist can visualise data \"flowing\" through a series of steps. The input is typically some raw data which has to be processed in some manner. The goal is to represent the data in such a way that is can be ingested by a machine learning algorithm. Along the way some steps will extract features, while others will normalize the data and remove undesirable elements. Pipelines are simple, and yet they are a powerful way of designing sophisticated machine learning systems.\n\nBoth [scikit-learn](https://stackoverflow.com/questions/33091376/python-what-is-exactly-sklearn-pipeline-pipeline) and [pandas](https://tomaugspurger.github.io/method-chaining) make it possible to use pipelines. However it's quite rare to see pipelines being used in practice (at least on Kaggle). Sometimes you get to see people using scikit-learn's `pipeline` module, however the `pipe` method from `pandas` is sadly underappreciated. A big reason why pipelines are not given much love is that it's easier to think of batch learning in terms of a script or a notebook. Indeed many people doing data science seem to prefer a procedural style to a declarative style. Moreover in practice pipelines can be a bit rigid if one wishes to do non-orthodox operations.\n\nAlthough pipelines may be a bit of an odd fit for batch learning, they make complete sense when they are used for online learning. Indeed the UNIX philosophy has advocated the use of pipelines for data processing for many decades. If you can visualise data as a stream of observations then using pipelines should make a lot of sense to you. We'll attempt to convince you by writing a machine learning algorithm in a procedural way and then converting it to a declarative pipeline in small steps. Hopefully by the end you'll be convinced, or not!\n\nIn this notebook we'll manipulate data from the [Kaggle Recruit Restaurants Visitor Forecasting competition](https://www.kaggle.com/c/recruit-restaurant-visitor-forecasting). The data is directly available through `river`'s `datasets` module.", "_____no_output_____" ] ], [ [ "from pprint import pprint\nfrom river import datasets\n\nfor x, y in datasets.Restaurants():\n pprint(x)\n pprint(y)\n break", "{'area_name': 'Tōkyō-to Nerima-ku Toyotamakita',\n 'date': datetime.datetime(2016, 1, 1, 0, 0),\n 'genre_name': 'Izakaya',\n 'is_holiday': True,\n 'latitude': 35.7356234,\n 'longitude': 139.6516577,\n 'store_id': 'air_04341b588bde96cd'}\n10\n" ] ], [ [ "We'll start by building and running a model using a procedural coding style. The performance of the model doesn't matter, we're simply interested in the design of the model.", "_____no_output_____" ] ], [ [ "from river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\n\nmeans = (\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)),\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)),\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))\n)\n\nscaler = preprocessing.StandardScaler()\nlin_reg = linear_model.LinearRegression()\nmetric = metrics.MAE()\n\nfor x, y in datasets.Restaurants():\n \n # Derive date features\n x['weekday'] = x['date'].weekday()\n x['is_weekend'] = x['date'].weekday() in (5, 6)\n \n # Process the rolling means of the target \n for mean in means:\n x = {**x, **mean.transform_one(x)}\n mean.learn_one(x, y)\n \n # Remove the key/value pairs that aren't features\n for key in ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']:\n x.pop(key)\n \n # Rescale the data\n x = scaler.learn_one(x).transform_one(x)\n \n # Fit the linear regression\n y_pred = lin_reg.predict_one(x)\n lin_reg.learn_one(x, y)\n \n # Update the metric using the out-of-fold prediction\n metric.update(y, y_pred)\n \nprint(metric)", "MAE: 8.465114\n" ] ], [ [ "We're not using many features. We can print the last `x` to get an idea of the features (don't forget they've been scaled!)", "_____no_output_____" ] ], [ [ "pprint(x)", "{'is_holiday': -0.23103573677646685,\n 'is_weekend': 1.6249280076334165,\n 'weekday': 1.0292832579142892,\n 'y_rollingmean_14_by_store_id': -1.4125913815779154,\n 'y_rollingmean_21_by_store_id': -1.3980979075298519,\n 'y_rollingmean_7_by_store_id': -1.3502314499809096}\n" ] ], [ [ "The above chunk of code is quite explicit but it's a bit verbose. The whole point of libraries such as `river` is to make life easier for users. Moreover there's too much space for users to mess up the order in which things are done, which increases the chance of there being target leakage. We'll now rewrite our model in a declarative fashion using a pipeline *à la sklearn*. ", "_____no_output_____" ] ], [ [ "from river import compose\n\n\ndef get_date_features(x):\n weekday = x['date'].weekday()\n return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}\n\n\nmodel = compose.Pipeline(\n ('features', compose.TransformerUnion(\n ('date_features', compose.FuncTransformer(get_date_features)),\n ('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7))),\n ('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14))),\n ('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)))\n )),\n ('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),\n ('scale', preprocessing.StandardScaler()),\n ('lin_reg', linear_model.LinearRegression())\n)\n\nmetric = metrics.MAE()\n\nfor x, y in datasets.Restaurants():\n \n # Make a prediction without using the target\n y_pred = model.predict_one(x)\n \n # Update the model using the target\n model.learn_one(x, y)\n \n # Update the metric using the out-of-fold prediction\n metric.update(y, y_pred)\n \nprint(metric)", "MAE: 8.38533\n" ] ], [ [ "We use a `Pipeline` to arrange each step in a sequential order. A `TransformerUnion` is used to merge multiple feature extractors into a single transformer. The `for` loop is now much shorter and is thus easier to grok: we get the out-of-fold prediction, we fit the model, and finally we update the metric. This way of evaluating a model is typical of online learning, and so we put it wrapped it inside a function called `progressive_val_score` part of the `evaluate` module. We can use it to replace the `for` loop.", "_____no_output_____" ] ], [ [ "from river import evaluate\n\nmodel = compose.Pipeline(\n ('features', compose.TransformerUnion(\n ('date_features', compose.FuncTransformer(get_date_features)),\n ('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7))),\n ('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14))),\n ('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)))\n )),\n ('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),\n ('scale', preprocessing.StandardScaler()),\n ('lin_reg', linear_model.LinearRegression())\n)\n\nevaluate.progressive_val_score(dataset=datasets.Restaurants(), model=model, metric=metrics.MAE())", "_____no_output_____" ] ], [ [ "Notice that you couldn't have used the `progressive_val_score` method if you wrote the model in a procedural manner.\n\nOur code is getting shorter, but it's still a bit difficult on the eyes. Indeed there is a lot of boilerplate code associated with pipelines that can get tedious to write. However `river` has some special tricks up it's sleeve to save you from a lot of pain.\n\nThe first trick is that the name of each step in the pipeline can be omitted. If no name is given for a step then `river` automatically infers one.", "_____no_output_____" ] ], [ [ "model = compose.Pipeline(\n compose.TransformerUnion(\n compose.FuncTransformer(get_date_features),\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)),\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)),\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))\n ),\n compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),\n preprocessing.StandardScaler(),\n linear_model.LinearRegression()\n)\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())", "_____no_output_____" ] ], [ [ "Under the hood a `Pipeline` inherits from `collections.OrderedDict`. Indeed this makes sense because if you think about it a `Pipeline` is simply a sequence of steps where each step has a name. The reason we mention this is because it means you can manipulate a `Pipeline` the same way you would manipulate an ordinary `dict`. For instance we can print the name of each step by using the `keys` method.", "_____no_output_____" ] ], [ [ "for name in model.steps:\n print(name)", "TransformerUnion\nDiscard\nStandardScaler\nLinearRegression\n" ] ], [ [ "The first step is a `FeatureUnion` and it's string representation contains the string representation of each of it's elements. Not having to write names saves up some time and space and is certainly less tedious.\n\nThe next trick is that we can use mathematical operators to compose our pipeline. For example we can use the `+` operator to merge `Transformer`s into a `TransformerUnion`. ", "_____no_output_____" ] ], [ [ "model = compose.Pipeline(\n compose.FuncTransformer(get_date_features) + \\\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)) + \\\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)) + \\\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)),\n\n compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),\n preprocessing.StandardScaler(),\n linear_model.LinearRegression()\n)\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())", "_____no_output_____" ] ], [ [ "Likewhise we can use the `|` operator to assemble steps into a `Pipeline`. ", "_____no_output_____" ] ], [ [ "model = (\n compose.FuncTransformer(get_date_features) +\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)) +\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)) +\n feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))\n)\n\nto_discard = ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']\n\nmodel = model | compose.Discard(*to_discard) | preprocessing.StandardScaler()\n\nmodel |= linear_model.LinearRegression()\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())", "_____no_output_____" ] ], [ [ "Hopefully you'll agree that this is a powerful way to express machine learning pipelines. For some people this should be quite remeniscent of the UNIX pipe operator. One final trick we want to mention is that functions are automatically wrapped with a `FuncTransformer`, which can be quite handy.", "_____no_output_____" ] ], [ [ "model = get_date_features\n\nfor n in [7, 14, 21]:\n model += feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(n))\n\nmodel |= compose.Discard(*to_discard)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())", "_____no_output_____" ] ], [ [ "Naturally some may prefer the procedural style we first used because they find it easier to work with. It all depends on your style and you should use what you feel comfortable with. However we encourage you to use operators because we believe that this will increase the readability of your code, which is very important. To each their own!\n\nBefore finishing we can take an interactive look at our pipeline.", "_____no_output_____" ] ], [ [ "model", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d05742a6f84547af3ed9abaab0c0e1b7d8ae135b
761,898
ipynb
Jupyter Notebook
climate_starter.ipynb
ahchambers/sqlalchemy-challenge
7f83a7ba92bfb01b91f92b49c3fef26607100f41
[ "MIT" ]
null
null
null
climate_starter.ipynb
ahchambers/sqlalchemy-challenge
7f83a7ba92bfb01b91f92b49c3fef26607100f41
[ "MIT" ]
null
null
null
climate_starter.ipynb
ahchambers/sqlalchemy-challenge
7f83a7ba92bfb01b91f92b49c3fef26607100f41
[ "MIT" ]
null
null
null
2,020.949602
672,348
0.613404
[ [ [ "%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "import datetime as dt", "_____no_output_____" ] ], [ [ "# Reflect Tables into SQLAlchemy ORM", "_____no_output_____" ] ], [ [ "# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func", "_____no_output_____" ], [ "engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")", "_____no_output_____" ], [ " # reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)", "_____no_output_____" ], [ "# We can view all of the classes that automap found\nBase.classes.keys()", "_____no_output_____" ], [ "# Save references to each table\nmeasurements = Base.classes.measurement\nstations = Base.classes.station", "_____no_output_____" ], [ "# Create our session (link) from Python to the DB\nsession = Session(engine)", "_____no_output_____" ] ], [ [ "# Exploratory Climate Analysis", "_____no_output_____" ] ], [ [ "# Design a query to retrieve the last 12 months of precipitation data and plot the results\n\n# Calculate the date 1 year ago from the last data point in the database\nlast_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n\n# Perform a query to retrieve the data and precipitation scores\nresults = session.query(measurements.date, measurements.prcp).filter(measurements.date >= last_year).all()\n\n# Save the query results as a Pandas DataFrame and set the index to the date column\ndata = pd.DataFrame(results, columns=['date', 'precipitation'])\n\n# Sort the dataframe by date\ndata = data.sort_values(\"date\")\n\n# Use Pandas Plotting with Matplotlib to plot the data\nx_axis=data[\"date\"]\ny_axis=data[\"precipitation\"]\nplt.scatter(x_axis, y_axis, marker=\"o\", facecolors=\"red\", edgecolors=\"black\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Measurement\")", "_____no_output_____" ], [ "# Use Pandas to calcualte the summary statistics for the precipitation data\ndata.describe()", "_____no_output_____" ], [ "# Design a query to show how many stations are available in this dataset?\nsession.query(func.count(stations.station)).all()", "_____no_output_____" ], [ "# What are the most active stations? (i.e. what stations have the most rows)?\n# List the stations and the counts in descending order.\nsession.query(measurements.station, func.count(1)).\\\n group_by(measurements.station).\\\n order_by(func.count(1).desc()).all()", "_____no_output_____" ], [ "# Using the station id from the previous query, calculate the lowest temperature recorded, \n# highest temperature recorded, and average temperature of the most active station?\nsel = [measurements.station,\n func.min(measurements.tobs),\n func.max(measurements.tobs),\n func.avg(measurements.tobs)]\n\nsession.query(*sel).\\\n filter(measurements.station == \"USC00519281\").all()", "_____no_output_____" ], [ "# Choose the station with the highest number of temperature observations.\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\nprecipitation_df = pd.DataFrame(session.query(measurements.date, measurements.tobs).\\\n filter(measurements.date > last_year).\\\n filter(measurements.station == \"USC00519281\").\\\n order_by(measurements.date).all(), columns = [\"Date\", \"temperature\"])\n\n# plot the results as a histogram\nprecipitation_df.plot(kind = \"hist\", bins = 12)\nplt.xlabel(\"Temperature\")\nplt.ylabel(\"Frequency\")\nplt.savefig(\"output/fig1.png\");", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d057485a4d33cfddd04a8984392c8bde3fd70897
239,888
ipynb
Jupyter Notebook
examples/Photometry_demo.ipynb
ke-fang/3ML
5f3208d878c8c3bd712c8db618b426138baceaa1
[ "BSD-3-Clause" ]
1
2021-01-26T14:21:26.000Z
2021-01-26T14:21:26.000Z
examples/Photometry_demo.ipynb
ke-fang/3ML
5f3208d878c8c3bd712c8db618b426138baceaa1
[ "BSD-3-Clause" ]
null
null
null
examples/Photometry_demo.ipynb
ke-fang/3ML
5f3208d878c8c3bd712c8db618b426138baceaa1
[ "BSD-3-Clause" ]
null
null
null
319.850667
39,720
0.919075
[ [ [ "# Photometric Plugin\n\nFor optical photometry, we provide the **PhotometryLike** plugin that handles forward folding of a spectral model through filter curves. Let's have a look at the avaiable procedures.\n\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nfrom threeML import *\n\n# we will need XPSEC models for extinction\nfrom astromodels.xspec import *\n\n# The filter library takes a while to load so you must import it explicitly..\nfrom threeML.plugins.photometry.filter_library import threeML_filter_library", "_____no_output_____" ] ], [ [ "## Setup\n\nWe use [speclite](http://speclite.readthedocs.io/en/latest/ ) to handle optical filters.\nTherefore, you can easily build your own custom filters, use the built in speclite filters, or use the 3ML filter library that we have built thanks to [Spanish Virtual Observatory](http://svo.cab.inta-csic.es/main/index.php). \n\n**If you use these filters, please be sure to cite the proper sources!**\n\n### Simple example of building a filter\nLet's say we have our own 1-m telescope with a Johnson filter and we happen to record the data. We also have simultaneous data at other wavelengths and we want to compare. Let's setup the optical plugin (we'll ignore the other data for now).\n\n", "_____no_output_____" ] ], [ [ "import speclite.filters as spec_filters\n\nmy_backyard_telescope_filter = spec_filters.load_filter('bessell-r')\n\n# NOTE:\nmy_backyard_telescope_filter.name", "_____no_output_____" ] ], [ [ "NOTE: the filter name is 'bessell-R'. The plugin will look for the name *after* the **'-'** i.e 'R'\n\n\nNow let's build a 3ML plugin via **PhotometryLike**. \n\nOur data are entered as keywords with the name of the filter as the keyword and the data in an magnitude,error tuple, i.e. R=(mag,mag_err):", "_____no_output_____" ] ], [ [ "my_backyard_telescope = PhotometryLike('backyard_astronomy',\n filters=my_backyard_telescope_filter, # the filter\n R=(20,.1) ) # the magnitude and error\n\nmy_backyard_telescope.display_filters()", "Using Gaussian statistic (equivalent to chi^2) with the provided errors.\n" ] ], [ [ "## 3ML filter library\nExplore the filter library. If you cannot find what you need, it is simple to add your own\n", "_____no_output_____" ] ], [ [ "threeML_filter_library.SLOAN", "_____no_output_____" ], [ "spec_filters.plot_filters(threeML_filter_library.SLOAN.SDSS)", "_____no_output_____" ], [ "spec_filters.plot_filters(threeML_filter_library.Herschel.SPIRE)", "_____no_output_____" ], [ "spec_filters.plot_filters(threeML_filter_library.Keck.NIRC2)", "_____no_output_____" ] ], [ [ "## Build your own filters\n\nFollowing the example from speclite, we can build our own filters and add them:", "_____no_output_____" ] ], [ [ "fangs_g = spec_filters.FilterResponse(\n wavelength = [3800, 4500, 5200] * u.Angstrom,\n response = [0, 0.5, 0], meta=dict(group_name='fangs', band_name='g'))\nfangs_r = spec_filters.FilterResponse(\n wavelength = [4800, 5500, 6200] * u.Angstrom,\n response = [0, 0.5, 0], meta=dict(group_name='fangs', band_name='r'))\n\nfangs = spec_filters.load_filters('fangs-g', 'fangs-r')\n\nfangslike = PhotometryLike('fangs',filters=fangs,g=(20,.1),r=(18,.1))\n\n\nfangslike.display_filters()", "Using Gaussian statistic (equivalent to chi^2) with the provided errors.\n" ] ], [ [ "## GROND Example\n\nNow we will look at GROND. We get the filter from the 3ML filter library.\n\n(Just play with tab completion to see what is available!)\n\n", "_____no_output_____" ] ], [ [ "grond = PhotometryLike('GROND',\n filters=threeML_filter_library.ESO.GROND,\n #g=(21.5.93,.23), # we exclude these filters\n #r=(22.,0.12),\n i=(21.8,.01),\n z=(21.2,.01),\n J=(19.6,.01),\n H=(18.6,.01),\n K=(18.,.01))", "Using Gaussian statistic (equivalent to chi^2) with the provided errors.\n" ], [ "grond.display_filters()", "_____no_output_____" ] ], [ [ "### Model specification\n\nHere we use XSPEC's dust extinction models for the milky way and the host ", "_____no_output_____" ] ], [ [ "spec = Powerlaw() * XS_zdust() * XS_zdust()\n\ndata_list = DataList(grond)\n\nmodel = Model(PointSource('grb',0,0,spectral_shape=spec))\n\nspec.piv_1 = 1E-2\nspec.index_1.fix=False\nspec.redshift_2 = 0.347\nspec.redshift_2.fix = True\n\nspec.e_bmv_2 = 5./2.93\nspec.e_bmv_2.fix = True\nspec.rv_2 = 2.93\nspec.rv_2.fix = True\n\n\nspec.method_2 = 3\nspec.method_2.fix=True\n\n\n\nspec.e_bmv_3 = .002/3.08\nspec.e_bmv_3.fix = True\nspec.rv_3= 3.08\nspec.rv_3.fix=True\nspec.redshift_3 = 0\nspec.redshift_3.fix=True\nspec.method_3 = 1\nspec.method_3.fix=True\n\njl = JointLikelihood(model,data_list)\n", "_____no_output_____" ] ], [ [ "We compute $m_{\\rm AB}$ from astromodels photon fluxes. This is done by convolving the differential flux over the filter response:\n\n$ F[R,f_\\lambda] \\equiv \\int_0^\\infty \\frac{dg}{d\\lambda}(\\lambda)R(\\lambda) \\omega(\\lambda) d\\lambda$\n\nwhere we have converted the astromodels functions to wavelength properly.", "_____no_output_____" ] ], [ [ "_ = jl.fit()", "Best fit values:\n\n" ] ], [ [ "We can now look at the fit in magnitude space or model space as with any plugin.\n", "_____no_output_____" ] ], [ [ "_=display_photometry_model_magnitudes(jl)", "_____no_output_____" ], [ "_ = plot_point_source_spectra(jl.results,flux_unit='erg/(cm2 s keV)',\n xscale='linear',\n energy_unit='nm',ene_min=1E3, ene_max=1E5, num_ene=200 )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0574bda01901f9303b9448ca4d2c154d30fdc06
209,080
ipynb
Jupyter Notebook
PM2.5_Exercise_4.ipynb
Coslate/Python_Lab1
a7d59772b9af09eb2f443bdd811d5a4ab542d7ba
[ "MIT" ]
null
null
null
PM2.5_Exercise_4.ipynb
Coslate/Python_Lab1
a7d59772b9af09eb2f443bdd811d5a4ab542d7ba
[ "MIT" ]
null
null
null
PM2.5_Exercise_4.ipynb
Coslate/Python_Lab1
a7d59772b9af09eb2f443bdd811d5a4ab542d7ba
[ "MIT" ]
null
null
null
80.199463
51,822
0.701138
[ [ [ "import pandas as pd\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n%matplotlib notebook\n\npm25_data_gps_right = pm25_data[(pm25_data.gps_lon > 121.4) & (pm25_data.gps_lat > 24.5)]\npm25_temperature_humidity_data_group = pm25_data_gps_right.groupby(['s_h0','s_t0'])[['s_d0', 's_d1']]\npm25_temperature_humidity_data_mean = pm25_temperature_humidity_data_group.mean()\ngroup_keys = pm25_temperature_humidity_data_group.groups.keys()\n\ntemp = []\nhum = []\nfor items in group_keys : \n (temp_val, hum_val) = items\n temp.append(temp_val)\n hum.append(hum_val)\n\n#print(temp)\n#print(hum)\n#print(\"type = {x}\".format(x = type(pm25_temperature_humidity_data_mean)))\n\nplt.figure(1)\nthreedee = plt.figure().gca(projection='3d')\nthreedee.scatter(temp, hum, pm25_temperature_humidity_data_mean['s_d0'], c = 'r')\n\nthreedee.set_xlabel('s_h0')\nthreedee.set_ylabel('s_t0')\nthreedee.set_zlabel('s_d0')\nplt.show()\n\nmax_sd0 = max(pm25_temperature_humidity_data_mean['s_d0'])\nmin_sd0 = min(pm25_temperature_humidity_data_mean['s_d0'])\nprint(\"max_sd0 = {x}\".format(x = max_sd0))\nprint(\"min_sd0 = {x}\".format(x = min_sd0))\nprint(pm25_temperature_humidity_data_mean['s_d0'])\n\nplt.figure(2)\nthreedee = plt.figure().gca(projection='3d')\nthreedee.scatter(temp, hum, pm25_temperature_humidity_data_mean['s_d1'], c = 'b')\n\nthreedee.set_xlabel('s_h0')\nthreedee.set_ylabel('s_t0')\nthreedee.set_zlabel('s_d1')\nplt.show()\n\nmax_sd1 = max(pm25_temperature_humidity_data_mean['s_d1'])\nmin_sd1 = min(pm25_temperature_humidity_data_mean['s_d1'])\nprint(\"max_sd1 = {x}\".format(x = max_sd1))\nprint(\"min_sd1 = {x}\".format(x = min_sd1))\nprint(pm25_temperature_humidity_data_mean['s_d1'])\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d05762035855c378ed171a4364e0eca33ff5f0a9
12,045
ipynb
Jupyter Notebook
lite/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb
hawk-praxs/examples
78ca0fb8873ace7b71cfbee31dd34035dcab1020
[ "Apache-2.0" ]
3
2020-06-09T07:39:16.000Z
2020-09-14T20:13:32.000Z
lite/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb
hawk-praxs/examples
78ca0fb8873ace7b71cfbee31dd34035dcab1020
[ "Apache-2.0" ]
7
2020-11-13T18:56:38.000Z
2022-03-12T00:37:46.000Z
lite/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb
hawk-praxs/examples
78ca0fb8873ace7b71cfbee31dd34035dcab1020
[ "Apache-2.0" ]
null
null
null
33
216
0.489
[ [ [ "##### Copyright 2018 The TensorFlow Authors.\n", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Tensorflow Lite Gesture Classification Example Conversion Script\n\n\nThis guide shows how you can go about converting the model trained with TensorFlowJS to TensorFlow Lite FlatBuffers.\n\nRun all steps in-order. At the end, `model.tflite` file will be downloaded.\n", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/mobile/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/mobile/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "**Install Dependencies**", "_____no_output_____" ] ], [ [ "!pip3 install tensorflow==1.14.0 keras==2.2.4 tensorflowjs==0.6.4 --force-reinstall", "_____no_output_____" ], [ "import traceback\nimport logging\nimport tensorflow.compat.v1 as tf\nimport keras.backend as K\nimport os\n\nfrom google.colab import files\n\nfrom keras import Model, Input\nfrom keras.applications import MobileNet\nfrom keras.engine.saving import load_model\n\nfrom tensorflowjs.converters import load_keras_model\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)", "_____no_output_____" ] ], [ [ "***Cleanup any existing models if necessary***", "_____no_output_____" ] ], [ [ "!rm -rf *.h5 *.tflite *.json *.bin", "_____no_output_____" ] ], [ [ "**Upload your Tensorflow.js Artifacts Here**\n\ni.e., The weights manifest **model.json** and the binary weights file **model-weights.bin**", "_____no_output_____" ] ], [ [ "files.upload()", "_____no_output_____" ] ], [ [ "**Export Configuration**", "_____no_output_____" ] ], [ [ "#@title Export Configuration\n\n# TensorFlow.js arguments\n\nconfig_json = \"model.json\" #@param {type:\"string\"}\nweights_path_prefix = None #@param {type:\"raw\"}\nmodel_tflite = \"model.tflite\" #@param {type:\"string\"}\n", "_____no_output_____" ] ], [ [ "**Model Converter**\n\nThe following class converts a TensorFlow.js model to a TFLite FlatBuffer", "_____no_output_____" ] ], [ [ "class ModelConverter:\n \"\"\"\n Creates a ModelConverter class from a TensorFlow.js model file.\n\n Args:\n :param config_json_path: Full filepath of weights manifest file containing the model architecture.\n :param weights_path_prefix: Full filepath to the directory in which the weights binaries exist.\n :param tflite_model_file: Name of the TFLite FlatBuffer file to be exported.\n\n :return:\n ModelConverter class.\n \"\"\"\n\n def __init__(self,\n config_json_path,\n weights_path_prefix,\n tflite_model_file\n ):\n self.config_json_path = config_json_path\n self.weights_path_prefix = weights_path_prefix\n self.tflite_model_file = tflite_model_file\n self.keras_model_file = 'merged.h5'\n\n # MobileNet Options\n self.input_node_name = 'the_input'\n self.image_size = 224\n self.alpha = 0.25\n self.depth_multiplier = 1\n self._input_shape = (1, self.image_size, self.image_size, 3)\n self.depthwise_conv_layer = 'conv_pw_13_relu'\n\n def convert(self):\n self.save_keras_model()\n self._deserialize_tflite_from_keras()\n logger.info('The TFLite model has been generated')\n self._purge()\n\n def save_keras_model(self):\n top_model = load_keras_model(self.config_json_path, self.weights_path_prefix,\n weights_data_buffers=None,\n load_weights=True,\n use_unique_name_scope=True)\n\n base_model = self.get_base_model()\n merged_model = self.merge(base_model, top_model)\n merged_model.save(self.keras_model_file)\n\n logger.info(\"The merged Keras HDF5 model has been saved as {}\".format(self.keras_model_file))\n\n def merge(self, base_model, top_model):\n \"\"\"\n Merges base model with the classification block\n :return: Returns the merged Keras model\n \"\"\"\n logger.info(\"Initializing model...\")\n\n layer = base_model.get_layer(self.depthwise_conv_layer)\n model = Model(inputs=base_model.input, outputs=top_model(layer.output))\n logger.info(\"Model created.\")\n\n return model\n\n def get_base_model(self):\n \"\"\"\n Builds MobileNet with the default parameters\n :return: Returns the base MobileNet model\n \"\"\"\n input_tensor = Input(shape=self._input_shape[1:], name=self.input_node_name)\n base_model = MobileNet(input_shape=self._input_shape[1:],\n alpha=self.alpha,\n depth_multiplier=self.depth_multiplier,\n input_tensor=input_tensor,\n include_top=False)\n return base_model\n\n def _deserialize_tflite_from_keras(self):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(self.keras_model_file)\n tflite_model = converter.convert()\n\n with open(self.tflite_model_file, \"wb\") as file:\n file.write(tflite_model)\n\n def _purge(self):\n logger.info('Cleaning up Keras model')\n os.remove(self.keras_model_file)", "_____no_output_____" ], [ "try:\n K.clear_session()\n converter = ModelConverter(config_json,\n weights_path_prefix,\n model_tflite)\n\n converter.convert()\n\nexcept ValueError as e:\n print(traceback.format_exc())\n print(\"Error occurred while converting\")", "_____no_output_____" ], [ "files.download(model_tflite)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0576e60400757bdc79028780ee444a4e0cf2f51
133,641
ipynb
Jupyter Notebook
AAAI/Learnability/CIN/Linear/ds2/size_100/synthetic_type2_Linear_m_50.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
2
2019-08-24T07:20:35.000Z
2020-03-27T08:16:59.000Z
AAAI/Learnability/CIN/Linear/ds2/size_100/synthetic_type2_Linear_m_50.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
null
null
null
AAAI/Learnability/CIN/Linear/ds2/size_100/synthetic_type2_Linear_m_50.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
3
2019-06-21T09:34:32.000Z
2019-09-19T10:43:07.000Z
89.153436
20,070
0.765783
[ [ [ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\n%matplotlib inline\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nimport torchvision\n\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn import functional as F\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)", "cuda\n" ], [ "m = 50 # 5, 50, 100, 500, 1000, 2000 ", "_____no_output_____" ], [ "desired_num = 200", "_____no_output_____" ], [ "tr_i = 0\ntr_j = int(desired_num/2)\ntr_k = desired_num\n\ntr_i, tr_j, tr_k", "_____no_output_____" ] ], [ [ "# Generate dataset", "_____no_output_____" ] ], [ [ "np.random.seed(12)\ny = np.random.randint(0,10,5000)\nidx= []\nfor i in range(10):\n print(i,sum(y==i))\n idx.append(y==i)", "0 530\n1 463\n2 494\n3 517\n4 488\n5 497\n6 493\n7 507\n8 492\n9 519\n" ], [ "x = np.zeros((5000,2))", "_____no_output_____" ], [ "np.random.seed(12)\nx[idx[0],:] = np.random.multivariate_normal(mean = [5,5],cov=[[0.1,0],[0,0.1]],size=sum(idx[0]))\nx[idx[1],:] = np.random.multivariate_normal(mean = [-6,7],cov=[[0.1,0],[0,0.1]],size=sum(idx[1]))\nx[idx[2],:] = np.random.multivariate_normal(mean = [-5,-4],cov=[[0.1,0],[0,0.1]],size=sum(idx[2]))\nx[idx[3],:] = np.random.multivariate_normal(mean = [-1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[3]))\nx[idx[4],:] = np.random.multivariate_normal(mean = [0,2],cov=[[0.1,0],[0,0.1]],size=sum(idx[4]))\nx[idx[5],:] = np.random.multivariate_normal(mean = [1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[5]))\nx[idx[6],:] = np.random.multivariate_normal(mean = [0,-1],cov=[[0.1,0],[0,0.1]],size=sum(idx[6]))\nx[idx[7],:] = np.random.multivariate_normal(mean = [0,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[7]))\nx[idx[8],:] = np.random.multivariate_normal(mean = [-0.5,-0.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[8]))\nx[idx[9],:] = np.random.multivariate_normal(mean = [0.4,0.2],cov=[[0.1,0],[0,0.1]],size=sum(idx[9]))", "_____no_output_____" ], [ "x[idx[0]][0], x[idx[5]][5] ", "_____no_output_____" ], [ "for i in range(10):\n plt.scatter(x[idx[i],0],x[idx[i],1],label=\"class_\"+str(i))\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))", "_____no_output_____" ], [ "bg_idx = [ np.where(idx[3] == True)[0], \n np.where(idx[4] == True)[0], \n np.where(idx[5] == True)[0],\n np.where(idx[6] == True)[0], \n np.where(idx[7] == True)[0], \n np.where(idx[8] == True)[0],\n np.where(idx[9] == True)[0]]\n\nbg_idx = np.concatenate(bg_idx, axis = 0)\nbg_idx.shape", "_____no_output_____" ], [ "np.unique(bg_idx).shape", "_____no_output_____" ], [ "x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)\n", "_____no_output_____" ], [ "np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)", "_____no_output_____" ], [ "x = x/np.std(x[bg_idx], axis = 0, keepdims = True)", "_____no_output_____" ], [ "np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)", "_____no_output_____" ], [ "for i in range(10):\n plt.scatter(x[idx[i],0],x[idx[i],1],label=\"class_\"+str(i))\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))", "_____no_output_____" ], [ "foreground_classes = {'class_0','class_1', 'class_2'}\n\nbackground_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}", "_____no_output_____" ], [ "fg_class = np.random.randint(0,3)\nfg_idx = np.random.randint(0,m)\n\na = []\nfor i in range(m):\n if i == fg_idx:\n b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)\n a.append(x[b])\n print(\"foreground \"+str(fg_class)+\" present at \" + str(fg_idx))\n else:\n bg_class = np.random.randint(3,10)\n b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)\n a.append(x[b])\n print(\"background \"+str(bg_class)+\" present at \" + str(i))\na = np.concatenate(a,axis=0)\nprint(a.shape)\n\nprint(fg_class , fg_idx)", "background 3 present at 0\nbackground 8 present at 1\nbackground 5 present at 2\nbackground 3 present at 3\nbackground 5 present at 4\nbackground 7 present at 5\nbackground 6 present at 6\nbackground 5 present at 7\nbackground 3 present at 8\nbackground 5 present at 9\nbackground 4 present at 10\nbackground 7 present at 11\nbackground 4 present at 12\nbackground 3 present at 13\nbackground 9 present at 14\nbackground 6 present at 15\nbackground 6 present at 16\nbackground 3 present at 17\nforeground 2 present at 18\nbackground 8 present at 19\nbackground 5 present at 20\nbackground 6 present at 21\nbackground 4 present at 22\nbackground 9 present at 23\nbackground 8 present at 24\nbackground 3 present at 25\nbackground 7 present at 26\nbackground 6 present at 27\nbackground 8 present at 28\nbackground 3 present at 29\nbackground 7 present at 30\nbackground 8 present at 31\nbackground 7 present at 32\nbackground 7 present at 33\nbackground 5 present at 34\nbackground 9 present at 35\nbackground 6 present at 36\nbackground 4 present at 37\nbackground 7 present at 38\nbackground 6 present at 39\nbackground 3 present at 40\nbackground 4 present at 41\nbackground 3 present at 42\nbackground 4 present at 43\nbackground 7 present at 44\nbackground 6 present at 45\nbackground 5 present at 46\nbackground 8 present at 47\nbackground 8 present at 48\nbackground 7 present at 49\n(50, 2)\n2 18\n" ], [ "np.reshape(a,(2*m,1))", "_____no_output_____" ], [ "\nmosaic_list_of_images =[]\nmosaic_label = []\nfore_idx=[]\nfor j in range(desired_num):\n np.random.seed(j)\n fg_class = np.random.randint(0,3)\n fg_idx = np.random.randint(0,m)\n a = []\n for i in range(m):\n if i == fg_idx:\n b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)\n a.append(x[b])\n# print(\"foreground \"+str(fg_class)+\" present at \" + str(fg_idx))\n else:\n bg_class = np.random.randint(3,10)\n b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)\n a.append(x[b])\n# print(\"background \"+str(bg_class)+\" present at \" + str(i))\n a = np.concatenate(a,axis=0)\n mosaic_list_of_images.append(np.reshape(a,(2*m,1)))\n mosaic_label.append(fg_class)\n fore_idx.append(fg_idx)", "_____no_output_____" ], [ "mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T\nmosaic_list_of_images.shape", "_____no_output_____" ], [ "mosaic_list_of_images.shape, mosaic_list_of_images[0]", "_____no_output_____" ], [ "for j in range(m):\n print(mosaic_list_of_images[0][2*j:2*j+2])\n ", "[ 0.50503387 -0.65144237]\n[-0.02599553 -1.54374402]\n[ 0.47835858 -1.25389275]\n[0.50044544 1.71315143]\n[-0.72603666 -0.68240088]\n[0.34070696 0.0288995 ]\n[ 0.71631136 -0.14262169]\n[-1.42817136 -0.38290202]\n[ 1.24086849 -0.14745404]\n[0.70237352 0.29618561]\n[0.22554429 0.19892312]\n[-0.13509221 2.32146152]\n[ 0.47065137 -1.70530655]\n[-0.04424218 -0.85844737]\n[ 0.62089149 -0.08656834]\n[-1.00159206 -0.61812566]\n[-2.28311639 -0.09651796]\n[ 0.24291406 -1.06097518]\n[ 0.55556748 -1.08092273]\n[0.1067679 0.69094112]\n[-1.81042807 -0.42154319]\n[0.34516798 1.91607747]\n[-1.44649678 0.35308529]\n[ 0.36515974 -0.67213795]\n[ 1.22158179 -0.22952126]\n[-0.39848729 -0.15758709]\n[-1.71963958 -0.15407132]\n[ 0.20179406 -0.01948999]\n[-0.17037075 -0.51969469]\n[-2.29570869 -0.14652342]\n[-1.21373644 0.2181639 ]\n[ 1.76844163 -0.1397818 ]\n[-1.06409105 0.04438987]\n[-0.4441898 2.69172256]\n[0.320152 1.70145171]\n[-0.70852583 -1.28478294]\n[-0.61407994 -1.32199884]\n[1.27399796 0.10715221]\n[-1.66469081 -0.5871587 ]\n[-0.43752875 -1.2548933 ]\n[-0.72733855 -0.49934895]\n[ 1.68006305 -0.41814497]\n[ 0.27427195 -1.04246037]\n[0.15548488 0.26248948]\n[-0.78406373 0.41505821]\n[0.02810277 0.25239245]\n[-0.35299762 -0.38493569]\n[7.07990475 5.57919731]\n[0.17135161 0.13756609]\n[1.5635545 0.13569962]\n" ], [ "def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):\n \"\"\"\n mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point\n labels : mosaic_dataset labels\n foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average\n dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is \"j\" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9\n \"\"\"\n avg_image_dataset = []\n cnt = 0\n counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])\n for i in range(len(mosaic_dataset)):\n img = torch.zeros([2], dtype=torch.float64)\n np.random.seed(int(dataset_number*10000 + i))\n give_pref = foreground_index[i] #np.random.randint(0,9)\n # print(\"outside\", give_pref,foreground_index[i])\n for j in range(m):\n if j == give_pref:\n img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim\n else :\n img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)\n\n if give_pref == foreground_index[i] :\n # print(\"equal are\", give_pref,foreground_index[i])\n cnt += 1\n counter[give_pref] += 1\n else :\n counter[give_pref] += 1\n\n avg_image_dataset.append(img)\n\n print(\"number of correct averaging happened for dataset \"+str(dataset_number)+\" is \"+str(cnt)) \n print(\"the averaging are done as \", counter) \n return avg_image_dataset , labels , foreground_index\n \n ", "_____no_output_____" ], [ "avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)\n\n\ntest_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)", "number of correct averaging happened for dataset 1 is 100\nthe averaging are done as [2. 1. 1. 3. 2. 3. 1. 3. 1. 1. 2. 4. 2. 2. 0. 5. 1. 1. 1. 2. 1. 0. 2. 3.\n 3. 1. 3. 3. 4. 1. 0. 1. 4. 0. 2. 4. 2. 2. 2. 1. 2. 2. 3. 1. 2. 1. 5. 2.\n 2. 3.]\nnumber of correct averaging happened for dataset 50 is 100\nthe averaging are done as [1. 4. 1. 1. 1. 3. 1. 3. 2. 0. 2. 2. 5. 1. 4. 0. 2. 3. 2. 5. 2. 2. 3. 2.\n 2. 4. 3. 0. 4. 4. 1. 0. 2. 0. 4. 2. 1. 2. 3. 1. 1. 2. 0. 2. 1. 2. 0. 2.\n 3. 2.]\n" ], [ "avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)\n# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)\n# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))\n# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))\nprint(\"==\"*40)\n\n\ntest_dataset = torch.stack(test_dataset, axis = 0)\n# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)\n# print(torch.mean(test_dataset, keepdims= True, axis = 0))\n# print(torch.std(test_dataset, keepdims= True, axis = 0))\nprint(\"==\"*40)\n", "================================================================================\n================================================================================\n" ], [ "x1 = (avg_image_dataset_1).numpy()\ny1 = np.array(labels_1)\n\nplt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')\nplt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')\nplt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')\nplt.legend()\nplt.title(\"dataset4 CIN with alpha = 1/\"+str(m))", "_____no_output_____" ], [ "x1 = (test_dataset).numpy() / m\ny1 = np.array(labels)\n\nplt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')\nplt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')\nplt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')\nplt.legend()\nplt.title(\"test dataset4\")", "_____no_output_____" ], [ "test_dataset[0:10]/m", "_____no_output_____" ], [ "test_dataset = test_dataset/m\ntest_dataset[0:10]", "_____no_output_____" ], [ "class MosaicDataset(Dataset):\n \"\"\"MosaicDataset dataset.\"\"\"\n\n def __init__(self, mosaic_list_of_images, mosaic_label):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.mosaic = mosaic_list_of_images\n self.label = mosaic_label\n #self.fore_idx = fore_idx\n \n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, idx):\n return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]\n\n", "_____no_output_____" ], [ "avg_image_dataset_1[0].shape\navg_image_dataset_1[0]", "_____no_output_____" ], [ "batch = 200\n\ntraindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )\ntrainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)\n", "_____no_output_____" ], [ "testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )\ntestloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)\n", "_____no_output_____" ], [ "testdata_11 = MosaicDataset(test_dataset, labels )\ntestloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)", "_____no_output_____" ], [ "class Whatnet(nn.Module):\n def __init__(self):\n super(Whatnet,self).__init__()\n self.linear1 = nn.Linear(2,3)\n # self.linear2 = nn.Linear(50,10)\n # self.linear3 = nn.Linear(10,3)\n\n torch.nn.init.xavier_normal_(self.linear1.weight)\n torch.nn.init.zeros_(self.linear1.bias)\n\n def forward(self,x):\n # x = F.relu(self.linear1(x))\n # x = F.relu(self.linear2(x))\n x = (self.linear1(x))\n\n return x", "_____no_output_____" ], [ "def calculate_loss(dataloader,model,criter):\n model.eval()\n r_loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataloader, 0):\n inputs, labels = data\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n outputs = model(inputs)\n loss = criter(outputs, labels)\n r_loss += loss.item()\n return r_loss/(i+1)", "_____no_output_____" ], [ "def test_all(number, testloader,net):\n correct = 0\n total = 0\n out = []\n pred = []\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n images, labels = images.to(\"cuda\"),labels.to(\"cuda\")\n out.append(labels.cpu().numpy())\n outputs= net(images)\n _, predicted = torch.max(outputs.data, 1)\n pred.append(predicted.cpu().numpy())\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n pred = np.concatenate(pred, axis = 0)\n out = np.concatenate(out, axis = 0)\n print(\"unique out: \", np.unique(out), \"unique pred: \", np.unique(pred) )\n print(\"correct: \", correct, \"total \", total)\n print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))", "_____no_output_____" ], [ "def train_all(trainloader, ds_number, testloader_list):\n \n print(\"--\"*40)\n print(\"training on data set \", ds_number)\n \n torch.manual_seed(12)\n net = Whatnet().double()\n net = net.to(\"cuda\")\n \n criterion_net = nn.CrossEntropyLoss()\n optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)\n \n acti = []\n loss_curi = []\n epochs = 1000\n running_loss = calculate_loss(trainloader,net,criterion_net)\n loss_curi.append(running_loss)\n print('epoch: [%d ] loss: %.3f' %(0,running_loss)) \n for epoch in range(epochs): # loop over the dataset multiple times\n ep_lossi = []\n\n running_loss = 0.0\n net.train()\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n # zero the parameter gradients\n optimizer_net.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion_net(outputs, labels)\n # print statistics\n running_loss += loss.item()\n loss.backward()\n optimizer_net.step()\n\n running_loss = calculate_loss(trainloader,net,criterion_net)\n if(epoch%200 == 0):\n print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) \n loss_curi.append(running_loss) #loss per epoch\n if running_loss<=0.05:\n print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))\n break\n\n print('Finished Training')\n \n correct = 0\n total = 0\n with torch.no_grad():\n for data in trainloader:\n images, labels = data\n images, labels = images.to(\"cuda\"), labels.to(\"cuda\")\n outputs = net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))\n \n for i, j in enumerate(testloader_list):\n test_all(i+1, j,net)\n \n print(\"--\"*40)\n \n return loss_curi\n ", "_____no_output_____" ], [ "train_loss_all=[]\n\ntestloader_list= [ testloader_1, testloader_11]", "_____no_output_____" ], [ "train_loss_all.append(train_all(trainloader_1, 1, testloader_list))", "--------------------------------------------------------------------------------\ntraining on data set 1\nepoch: [0 ] loss: 1.059\nepoch: [1] loss: 1.059\nepoch: [201] loss: 1.008\nepoch: [401] loss: 0.972\nepoch: [601] loss: 0.940\nepoch: [801] loss: 0.909\nFinished Training\nAccuracy of the network on the 100 train images: 68.00 %\nunique out: [0 1 2] unique pred: [0 1 2]\ncorrect: 68 total 100\nAccuracy of the network on the 100 test dataset 1: 68.00 %\nunique out: [0 1 2] unique pred: [0 1 2]\ncorrect: 100 total 100\nAccuracy of the network on the 100 test dataset 2: 100.00 %\n--------------------------------------------------------------------------------\n" ], [ "%matplotlib inline", "_____no_output_____" ], [ "for i,j in enumerate(train_loss_all):\n plt.plot(j,label =\"dataset \"+str(i+1))\n \n\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Training_loss\")\n\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0576f7f12dae50b9911a5667feca44dc4ebfb63
3,742
ipynb
Jupyter Notebook
jw/summary_diary_0.ipynb
JoonSeongLee/dss7-coupon
b2d01fbdfce10bca56b0339fca7e7dff7a1feb4f
[ "MIT" ]
null
null
null
jw/summary_diary_0.ipynb
JoonSeongLee/dss7-coupon
b2d01fbdfce10bca56b0339fca7e7dff7a1feb4f
[ "MIT" ]
null
null
null
jw/summary_diary_0.ipynb
JoonSeongLee/dss7-coupon
b2d01fbdfce10bca56b0339fca7e7dff7a1feb4f
[ "MIT" ]
null
null
null
21.505747
123
0.49225
[ [ [ "# 자세한 그래프는 02_coupon_list_ref 참고", "_____no_output_____" ], [ "## 카테고리, 가격관련", "_____no_output_____" ], [ "#### 1. CAPSULE_TEXT vs GENRE_NAME\n#### = 쿠폰종류에 대해 CAPSULE은 세세히, GENRE는 개략적으로 분류\n#### 2. Delivery, Food 쿠폰이 전체 쿠폰의 약 절반을 차지함\n#### 3. 세일(PRICE_RATIO)은 50%, 원가(CATALOG_PRICE)는 5000엔, 할인가(DISCOUNT_PRICE)는 2000엔이 제일 많다\n#### 4. 할인이 안된 쿠폰은 23개밖에 안됨", "_____no_output_____" ], [ "-----------", "_____no_output_____" ], [ "## DISP, VALID 관련", "_____no_output_____" ], [ "#### 5. 쿠폰은 2012년보다 2011년에 많이 Sales함\n#### 6. 2~4일(DISPPERIOD)동안 Sales한 쿠폰이 전체의 80%를 차지\n#### 7. 대부분의 쿠폰 유효기간(VALIDPERIOD)이 2012년에 끝남\n#### 8. 유효기간(VALIDPERIOD)은 1일~180일까지 존재하는데 그 중 89, 178, 179일인 쿠폰이 전체의 20%를 차지\n#### 9. 전체쿠폰은 Delivery, Food가 50%지만 얘네는 Food가 66%\n#### 10. 유효기간이 78, 178, 179일을 제외한 쿠폰은 전체의 80%를 차지\n#### 11. 얘네는 비교적 전체의 경향을 잘 반영한다", "_____no_output_____" ], [ "--------------------------", "_____no_output_____" ], [ "## USABLE_DATE 관련", "_____no_output_____" ], [ "#### 12. 휴일이나 주말에 사용가능(USABLE)한 쿠폰이 많을 줄 알았는데 오히려 주중이 많다\n#### 13. 전체 중 각 요일에 USABLE한 쿠폰 비율이 월화수목 순으로 오르다 목요일에 피크를 찍고 그 이후로 하락세\n#### 14. 쿠폰을 쓰려면 직원이 일을 해야 하는데 휴일이나 주말엔 직원들이 일을 안해서 그럴 것이다", "_____no_output_____" ], [ "-----------------------", "_____no_output_____" ], [ "## area 관련", "_____no_output_____" ], [ "#### 15. 쿠폰의 상점지역이 아래와 같은 3가지 기준으로 기록되있다.\n#### large_area(대), ken_name(중,prefecture), small_area(소)인 듯\n#### 16. 전체 쿠폰의 각 지역별 비율을 알기 위해 large_area만 고려하기로 했다.\n#### 17. large_area는 총 9개의 지역으로 구분된다.\n##### 'Kanto', 'Kansai', 'East Sea', 'Hokkaido', 'Kyushu-Okinawa', 'Northeast', 'Shikoku', 'China', 'Hokushinetsu'\n#### 18. 이 중 Kanto, Kansai가 전체 쿠폰의 70% 정도를 차지함\n#### 19. Kanto에 속하는 prefecture\n#### = Saitama, Chiba, Gunma, Tochigi, Ibaraki, Tokyo, Kanagawa\n\n#### 20. Kansai에 속하는 prefecture\n#### = Mie, Nara, Wakayama, Kyoto, Osaka, Hyōgo, Shiga, Tokushima, Tottori, Fukui", "_____no_output_____" ], [ "-------------------------", "_____no_output_____" ], [ "# Kanto", "_____no_output_____" ], [ "<img src=\"img/kanto.PNG\">", "_____no_output_____" ], [ "# Kansai", "_____no_output_____" ], [ "<img src=\"img/kansai.png\">", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d057712bee390e87c2c5037456ca85d7e4e55f24
63,973
ipynb
Jupyter Notebook
day2_student_notebook.ipynb
dukeplusds/mlwscv2002
d72de8ab8d8babe42cf30a4e456fcace82165af2
[ "MIT" ]
9
2022-01-02T14:06:28.000Z
2022-01-05T21:22:32.000Z
day2_student_notebook.ipynb
dukeplusds/mlwscv2002
d72de8ab8d8babe42cf30a4e456fcace82165af2
[ "MIT" ]
null
null
null
day2_student_notebook.ipynb
dukeplusds/mlwscv2002
d72de8ab8d8babe42cf30a4e456fcace82165af2
[ "MIT" ]
15
2021-12-31T15:53:16.000Z
2022-01-14T00:30:52.000Z
37.853846
381
0.612243
[ [ [ "# Introduction to Convolutional Neural Networks (CNNs) in PyTorch", "_____no_output_____" ], [ "### Representing images digitally\n\nWhile convolutional neural networks (CNNs) see a wide variety of uses, they were originally designed for images, and CNNs are still most commonly used for vision-related tasks.\nFor today, we'll primarily be focusing on CNNs for images.\nBefore we dive into convolutions and neural networks, it's worth prefacing with how images are represented by a computer, as this understanding will inform some of our design choices.\n\nPreviously, we saw an example of a digitized MNIST handwritten digit.\nSpecifically, we represent it as an $H \\times W$ table, with the value of each element storing the intensity of the corresponding pixel.\n\n<img src=\"./Figures/mnist_digital.png\" alt=\"mnist_digital\" style=\"width: 600px;\"/>\n\nWith a 2D representation as above, we for the most part can only efficiently represent grayscale images.\nWhat if we want color?\nThere are many schemes for storing color, but one of the most common ones is the [RGB color model](https://en.wikipedia.org/wiki/RGB_color_model).\nIn such a system, we store 3 tables of pixel intensities (each called a *channel*), one each for the colors red, green, and blue (hence RGB), resulting in an $H \\times W \\times 3$ tensor.\nPixel values for a particular channel indicate how much of the corresponding color the image has at a particular location.", "_____no_output_____" ], [ "## Let's load an image and look at different channels:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport imageio\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# Read the image \"./Figures/chapel.jpg\" from the disk.\n# Hint: use `im = imageio.imread(<Path to the image>)`.\n\n# Print the shape of the tensor\n\n# Display the image", "_____no_output_____" ] ], [ [ "We can see that the image we loaded has height and width of $620 \\times 1175$, with 3 channels corresponding to RGB.\n\nWe can easily slice out and view individual color channels:", "_____no_output_____" ] ], [ [ "# Uncomment the following command to extract the red channel of the above image.\n# im_red = im[:,:,0]\n\n# Display the image\n# Hint: To display the pixel values for a single channel, we can display the image using the gray-scale colormap", "_____no_output_____" ], [ "# Repeat the above for the blue channel to visualize features represented in the blue color channel.", "_____no_output_____" ] ], [ [ "While we have so far considered only 3 channel RGB images, there are many settings in which we may consider a different number of channels.\nFor example, [hyperspectral imaging](https://en.wikipedia.org/wiki/Hyperspectral_imaging) uses a wide range of the electromagnetic spectrum to characterize a scene.\nSuch modalities may have hundreds of channels or more.\nAdditionally, we'll soon see that certain intermediate representations in a CNN can be considered images with many channels.", "_____no_output_____" ], [ "### Convolutions\nConvolutional neural networks (CNNs) are a class of neural networks that have convolutional layers.\nCNNs are particularly effective for data that have spatial structures and correlations (e.g. images).\nWe'll focus on CNNs applied to images in this tutorial.\nRecall that a multilayer perceptron (MLP) is entirely composed of fully connected layers, which are each a matrix multiply operation (and addition of a bias) followed by a non-linearity (e.g. sigmoid, ReLU). \nA convolutional layer is similar, except the matrix multiply operation is replaced with a convolution operation (in practice a cross-correlation). \nNote that a CNN need not be entirely composed of convolutional layers; in fact, many popular CNN architectures end in fully connected layers.\n\nAs before, since we're building neural networks, let's start by loading PyTorch. We'll find NumPy useful as well, so we'll also import that here.", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# PyTorch Imports\n##################################################\n# #\n# ---- YOUR CODE HERE ---- #\n# #\n##################################################", "_____no_output_____" ] ], [ [ "#### Review: Fully connected layer\nIn a fully connected layer, the input $x \\in \\mathbb R^{M \\times C_{in}}$ is a vector (or, rather a batch of vectors), where $M$ is the minibatch size and $C_{in}$ is the dimensionality of the input. \nWe first matrix multiply the input $x$ by a weight matrix $W$.\nThis weight matrix has dimensions $W \\in \\mathbb R^{C_{in} \\times C_{out}}$, where $C_{out}$ is the number of output units.\nWe then add a bias for each output, which we do by adding $b \\in \\mathbb{R}^{C_{out}}$.\nThe output $y \\in \\mathbb{R}^{M \\times C_{out}}$ of the fully connected layer then:\n\n\\begin{align*}\ny = \\text{ReLU}(x W + b)\n\\end{align*}\n\nRemember, the values of $W$ and $b$ are variables that we are trying to learn for our model. \nBelow we have a visualization of what the matrix operation looks like (bias term and activation function omitted).\n\n<img src=\"./Figures/mnist_matmul.png\" width=\"800\"/>", "_____no_output_____" ] ], [ [ "# Create a random flat input vector\nx_fc = torch.randn(100, 1024)\n\n# Create weight matrix variable\nW = torch.randn(1024, 10)/np.sqrt(1024)\n\n# Create bias variable\nb = torch.zeros(10, requires_grad=True)\n\n# Use `W` and `b` to apply a fully connected layer. \n# Store the output in variable `y`.\n# Don't forget to apply the activation function.\n##################################################\n# ---- YOUR CODE HERE ---- #\n##################################################\n\n# Print input/output shape\nprint(\"Input shape: {}\".format(x_fc.shape))\nprint(\"Output shape: {}\".format(y.shape))", "_____no_output_____" ] ], [ [ "#### Convolutional layer\n\nIn a convolutional layer, we convolve the input $x$ with a convolutional kernel (aka filter), which we also call $W$, producing output $y$:\n\n\\begin{align*}\ny = \\text{ReLU}(W*x + b)\n\\end{align*}\n\nIn the context of CNNs, the output $y$ is often referred to as feature maps. As with a fully connected layer, the goal is to learn $W$ and $b$ for our model.\n\nUnlike the input of a fully connected layer, which is $x \\in \\mathbb R^{M\\times C_{in}}$, the dimensionality of an image input is 4D: $x \\in \\mathbb R^{M \\times C_{in} \\times H_{in} \\times W_{in}}$, where $M$ is still the batch size, $C_{in}$ is the number of channels of the input (e.g. 3 for RGB), and $H_{in}$ and $W_{in}$ are the height and width of the image.\n\nThe weight parameter $W$ is also different in a convolutional layer.\nUnlike the 2-D weight matrix for fully connected layers, the kernel is 4-D with dimensions $W \\in \\mathbb R^{C_{out} \\times C_{in} \\times H_K \\times W_K }$, where $H_K$ and $W_K$ are the kernel height and weight, respectively.\nA common choice for $H_K$ and $W_K$ is $H_K = W_K = 3$ or $5$, but this tends to vary depending on the architecture.\nConvolving the input with the kernel and adding a bias then gives an output $y \\in \\mathbb R^{M \\times C_{out} \\times H_{out} \\times W_{out}}$.\nIf we use \"same\" padding and a stride of $1$ in our convolution (more on this later), our output will have the same spatial dimensions as the input: $H_{out}=H_{in}$ and $W_{out}=W_{in}$.\n\nIf you're having trouble visualizing this operation in 4D, it's easier to think about for a single member of the minibatch, one convolutional kernel at a time. \nConsider a stack of $C_{out}$ number of kernels, each of which are 3D ($C_{in} \\times H_K \\times W_K $). \nThis 3D volume is then slid across the input (which is also 3D: $C_{in} \\times H_{in} \\times W_{in}$) in the two spatial dimensions (along $H_{in}$ and $W_{in}$). \nThe outputs of the multiplication of the kernel and the input at every location creates a single feature map that is $H_{out} \\times W_{out}$. \nStacking the feature maps generated by each kernel gives the 3D output $C_{out} \\times H_{out} \\times W_{out} $.\nRepeat the process for all $M$ inputs in the minibatch, and we get a 4D output $M \\times C_{out} \\times H_{out} \\times W_{out}$.\n\n<img src=\"./Figures/conv_filters.png\" alt=\"Convolutional filters\" style=\"width: 600px;\"/>\n\nA few more things to note:\n- Notice the ordering of the dimensions of the input (batch, channels in, height, width).\nThis is commonly referred to as $NCHW$ ordering.\nMany other languages and libraries (e.g. MATLAB, TensorFlow, the image example at the beginning of this notebook) instead default to the slightly different $NHWC$ ordering.\nPyTorch defaults to $NCHW$, as it more efficient computationally, especially with CUDA. \n- An additional argument for the convolution is the *stride*, which controls the how far we slide the convolutional filter as we move it along the input image. \nThe convolutional operator, from its signal processing roots, by default considers a stride length of 1 in all dimensions, but in some situations we would like to consider strides more than 1 (or even less than 1). \nMore on this later.\n- In the context of signal processing, convolutions usually result in outputs that are larger than the input size, which results from when the kernel \"hangs off the edge\" of the input on both sides. \nThis might not always be desirable.\nWe can control this by controlling the padding of the input.\nTypically, we use pad the input to ensure the output has the same spatial dimensions as the input (assuming stride of 1); this makes it easier for us to keep track of what the size of our model is.\n\nLet's implement this convolution operator in code.\nThere is a convolution implementation in `torch.nn.functional`, which we use here.", "_____no_output_____" ] ], [ [ "# Create a random 4D tensor. Use the NCHW format, where N = 100, C = 3, H = W =32\nx_cnn = \n\n# Create convolutional kernel variable (C_out, C_in, H_k, W_k)\nW1 = \n\n# Create a bias variable of size C_out\nb1 = \n\n# Apply the convolutional layer with relu activation\nconv1 = \n\n# Print input/output shape\nprint(\"Input shape: {}\".format(x_cnn.shape))\nprint(\"Convolution output shape: {}\".format(conv1.shape))", "_____no_output_____" ] ], [ [ "Just like in a MLP, we can stack multiple of these convolutional layers. \nIn the *Representing Images Digitally* section, we briefly mentioned considering images with channels more than 3.\nObserve that the input to the second layer (i.e. the output of the first layer) can be viewed as an \"image\" with $C_{out}$ channels.\nInstead of each channel representing a color content though, each channel effectively represents how much the original input image activated a particular convolutional kernel.\nGiven $C_{out}$ kernels that are each $C_{in} \\times H_K \\times W_K$, this results in $C_{out}$ channels for the output of the convolution.\n\nNote that we need to change the dimensions of the convolutional kernel such that its input channels matches the number of output channels of the previous layer:", "_____no_output_____" ] ], [ [ "# Create the second convolutional layer by defining a random `W2` and `b2`\nW2 = \nb2 = \n\n# Apply 2nd convolutional layer to the output of the first convolutional layer\nconv2 = \n\n# Print output shape\nprint(\"Second convolution output shape: {}\".format(conv2.shape))", "_____no_output_____" ] ], [ [ "In fact, we typically perform these convolution operations many times. \nPopular CNN architectures for image analysis today can be 100+ layers.", "_____no_output_____" ], [ "### Reshaping\n\nYou'll commonly finding yourself needing to reshape tensors while building CNNs.\nThe PyTorch function for doing so is `view()`. \nAnyone familiar with NumPy will find it very similar to `np.reshape()`.\nImportantly, the new dimensions must be chosen so that it is possible to rearrange the input into the shape of the output (i.e. the total number of elements must be the same).\nAs with NumPy, you can optionally replace one of the dimensions with a `-1`, which tells `torch` to infer the missing dimension.", "_____no_output_____" ] ], [ [ "M = torch.zeros(4, 3)\n\nM2 = M.view(1,1,12)\nM3 = M.view(2,1,2,3)\nM4 = M.view(-1,2,3)\nM5 = M.view(-1)", "_____no_output_____" ] ], [ [ "To get an idea of why reshaping is need in a CNN, let's look at a diagram of a simple CNN.\n\n<img src=\"Figures/mnist_cnn_ex.png\" alt=\"mnist_cnn_ex\" style=\"width: 800px;\"/>\n\nFirst of all, the CNN expects a 4D input, with the dimensions corresponding to `[batch, channel, height, width]`.\nYour data may not come in this format, so you may have to reshape it yourself.", "_____no_output_____" ] ], [ [ "x_flat = torch.randn(100, 1024)\n\n# Reshape flat input image into a 4D batched image input\n# Hint: Use batch=100, height=width=32.\nx_reshaped = \n\n# Print input shape\nprint(x_reshaped.shape)", "_____no_output_____" ] ], [ [ "CNN architectures also commonly contain fully connected layers or a softmax, as we're often interested in classification.\nBoth of these expect 2D inputs with dimensions `[batch, dim]`, so you have to \"flatten\" a CNN's 4D output to 2D.\nFor example, to flatten the convolutional feature maps we created earlier:", "_____no_output_____" ] ], [ [ "# Flatten convolutional feature maps into a vector\nh_flat = conv2.view(-1, 32*32*32)\n\n# Print output shape\nprint(h_flat.shape)", "_____no_output_____" ] ], [ [ "### Pooling and striding\n\nAlmost all CNN architectures incorporate either pooling or striding. This is done for a number of reasons, including:\n- Dimensionality reduction: pooling and striding operations reduces computational complexity by shrinking the number of values passed to the next layer.\nFor example, a 2x2 maxpool reduces the size of the feature maps by a factor of 4.\n- Translational invariance: Oftentimes in computer vision, we'd prefer that shifting the input by a few pixels doesn't change the output. Pooling and striding reduces sensitivity to exact pixel locations.\n- Increasing receptive field: by summarizing a window with a single value, subsequent convolutional kernels are seeing a wider swath of the original input image. For example, a max pool on some input followed by a 3x3 convolution results in a kernel \"seeing\" a 6x6 region instead of 3x3.\n\n#### Pooling\nThe two most common forms of pooling are max pooling and average pooling. \nBoth reduce values within a window to a single value, on a per-feature-map basis.\nMax pooling takes the maximum value of the window as the output value; average pooling takes the mean.\n\n<img src=\"./Figures/maxpool.png\" alt=\"avg_vs_max\" style=\"width: 800px;\"/>", "_____no_output_____" ] ], [ [ "# Recreate the values in pooling figure with shape [4,4]\nfeature_map_fig = \n\n# Convert 2D matrix to a 4D tensor of shape [1,1,4,4].\nfmap_fig = \n\nprint(\"Feature map shape pre-pooling: {}\".format(fmap_fig.shape))\n\n# Apply max pool to fmap_fig\nmax_pool_fig = \n\nprint(\"\\nMax pool\")\nprint(\"Shape: {}\".format(max_pool_fig.shape))\nprint(torch.squeeze(max_pool_fig))\n\n# Apply Avgerage pool to fmap_fig\navg_pool_fig =\n\nprint(\"\\nAvg pool\")\nprint(\"Shape: {}\".format(avg_pool_fig.shape))\nprint(torch.squeeze(avg_pool_fig))", "_____no_output_____" ] ], [ [ "Now we will apply max pool and average pool to the output of the convolutional layer `conv2`.", "_____no_output_____" ] ], [ [ "# Taking the output we've been working with so far, first print its current size\nprint(\"Shape of conv2 feature maps before pooling: {0}\".format(conv2.shape))\n\n# Apply Max pool with size = 2 and then print new shape.\nmax_pool2 = \nprint(\"Shape of conv2 feature maps after max pooling: {0}\".format(max_pool2.shape))\n\n# Average pool with size = 2 and then print new shape\navg_pool2 = \nprint(\"Shape of conv2 feature maps after avg pooling: {0}\".format(avg_pool2.shape))", "_____no_output_____" ] ], [ [ "#### Striding\nOne might expect that pixels in an image have high correlation with neighboring pixels, so we can save computation by skipping positions while sliding the convolutional kernel. \nBy default, a CNN slides across the input one pixel at a time, which we call a stride of 1.\nBy instead striding by 2, we skip calculating 75% of the values of the output feature map, which yields a feature map that's half the size in each spatial direction.\nNote, while pooling is an operation done after the convolution, striding is part of the convolution operation itself.", "_____no_output_____" ] ], [ [ "# Since striding is part of the convolution operation, we'll start with the feature maps before the 2nd convolution\nprint(\"Shape of conv1 feature maps: {0}\".format(conv1.shape))\n\n# Apply 2nd convolutional layer, with striding of 2\nconv2_strided = \n\n# Print output shape\nprint(\"Shape of conv2 feature maps with stride of 2: {0}\".format(conv2_strided.shape))", "_____no_output_____" ] ], [ [ "## Building a custom CNN", "_____no_output_____" ], [ "Let's revisit MNIST digit classification, but this time, we'll use the following CNN as our classifier: $5 \\times 5$ convolution -> $2 \\times 2$ max pool -> $5 \\times 5$ convolution -> $2 \\times 2$ max pool -> fully connected to $\\mathbb R^{256}$ -> fully connected to $\\mathbb R^{10}$ (prediction). \nReLU activation functions will be used to impose non-linearities.\nRemember, convolutions produce 4-D outputs, and fully connected layers expect 2-D inputs, so tensors must be reshaped when transitioning from one to the other.\n\nWe can build this CNN with the components introduced before, but as with the logistic regression example, it may prove helpful to instead organize our model with a `nn.Module`.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\n\n# Important: Inherit the `nn.Module` class to define a PyTorch model\nclass CIFAR_CNN():\n def __init__(self):\n super().__init__()\n \n # Step 1: Define the first convoluation layer (C_in=3, C_out=32, H_k=W_k=5, padding = 2)\n self.conv1 = \n \n # Step 2: Define the second convolutional layer (C_out=64, H_k=W_k=5, padding = 2)\n self.conv2 = \n \n # Step 3: Define the first fully-connected layer with an output dimension of 256.\n # What should be the input dimension of this layer? \n self.fc1 = \n \n # Step 4: Define the second fully-connected layer with an output dimension of 10 (# of classes).\n self.fc2 = \n\n def forward(self, x):\n # Step 5: Using the layers defined in __init__ function, define the forward pass of the neural network below:\n \n # Apply conv layer 1, activation, and max-pool\n \n # Apply conv layer 2, activation, and max-pool\n \n # Reshape to kernel for fully-connected layer\n \n # Apply fc layer 1 and activation\n \n # Apply fc layer 2\n output = \n \n return output ", "_____no_output_____" ] ], [ [ "Notice how our `nn.Module` contains several operation chained together.\nThe code for submodule initialization, which creates all the stateful parameters associated with each operation, is placed in the `__init__()` function, where it is run once during object instantiation.\nMeanwhile, the code describing the forward pass, which is used every time the model is run, is placed in the `forward()` method.\nPrinting an instantiated model shows the model summary:", "_____no_output_____" ] ], [ [ "model = CIFAR_CNN()\nprint(model)", "_____no_output_____" ] ], [ [ "We can drop this model into our logistic regression training code, with few modifications beyond changing the model itself.\nA few other changes:\n- CNNs expect a 4-D input, so we no longer have to reshape the images before feeding them to our neural network.\n- Since CNNs are a little more complex than models we've worked with before, we're going to increase the number of epochs (complete passes through the training data) during training.\n- We switch from a vanilla stochastic gradient descent optimizer to the [Adam](https://arxiv.org/abs/1412.6980) optimizer, which tends to do well for neural networks.", "_____no_output_____" ], [ "## Training the CNN", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom tqdm.notebook import tqdm, trange\n\ncifar_train = datasets.CIFAR10(root=\"./datasets/cifar-10/\", train=True, transform=transforms.ToTensor(), download=True)\ncifar_test = datasets.CIFAR10(root=\"./datasets/cifar-10/\", train=False, transform=transforms.ToTensor(), download=True)\n\n# Creatre the train and test data loaders.\ntrain_loader = \ntest_loader = ", "_____no_output_____" ], [ "# Create a loader identical to the training laoder with a sample size of 8. This is to demonstrate \n# how we display images. If we had used the train_loader, we would be looking at 100 images!\nsample_loader = \n\n#define an image viewing function\ndef imshow(img):\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n#list out the classes for the dataset in order from 0 to 9 to correspond to the integer labels\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n#Take a sample of 1 batch from the sample loader\ndataiter = iter(sample_loader)\nimages, labels = dataiter.next()\n\n# show images\nimshow(torchvision.utils.make_grid(images))\n# print labels\nprint(' '.join('%5s' % classes[labels[j]] for j in range(8)))", "_____no_output_____" ], [ "# Instantiate model \nmodel = \n\n# Loss and Optimizer\ncriterion = \noptimizer = \ntrack_loss = []\n\n# Iterate through train set minibatchs\nnum_training_steps = 0\nfor epoch in trange(3):\n for images, labels in tqdm(train_loader):\n \n # Step 1: Zero out the gradients.\n \n # Step 2: Forward pass.\n \n # Step 3: Compute the loss using `criterion`.\n \n # Step 5: Backward pass.\n \n # Step 6: Update the parameters.\n \n # Step 7: Track the loss value at every 100th step.\n if num_training_steps % 100 == 0:\n # Append loss to the list.\n track_loss.append()\n \n num_training_steps += 1", "_____no_output_____" ] ], [ [ "### Let's plot the loss function", "_____no_output_____" ] ], [ [ "##################################################\n# #\n# ---- YOUR CODE HERE ---- #\n# #\n##################################################", "_____no_output_____" ] ], [ [ "## Testing the trained model", "_____no_output_____" ] ], [ [ "## Testing\ncorrect = 0\ntotal = len(cifar_test)\n\nwith torch.no_grad():\n # Iterate through test set minibatchs \n for images, labels in tqdm(test_loader):\n \n # Step 1: Forward pass to get \n y = \n \n # Step 2: Compute the predicted labels from `y`.\n predictions = \n \n # Step 3: Compute the number of samples that were correctly predicted, and maintain the count in the variable `correct`.\n correct +=\n\nprint('Test accuracy: {}'.format(correct/total))", "_____no_output_____" ] ], [ [ "If you are running this notebook on CPU, training this CNN might take a while.\nOn the other hand, if you use a GPU, this model should train in seconds.\nThis is why we usually prefer to use GPUs when we have them.", "_____no_output_____" ], [ "### Torchvision", "_____no_output_____" ], [ "#### Datasets and transforms\n\nAs any experienced ML practioner will say, data wrangling is often half (sometimes even 90%) of the battle when building a model.\nOften, we have to write significant code to handle downloading, organizing, formatting, shuffling, pre-processing, augmenting, and batching examples. \nFor popular datasets, we'd like to standardize data handling so that the comparisons we make are specific to the models themselves.\n\nEnter [Torchvision](https://pytorch.org/vision/stable/index.html).\nTorchvision includes easy-to-use APIs for downloading and loading many popular vision datasets.\nWe've previously seen this in action for downloading the MNIST dataset:", "_____no_output_____" ] ], [ [ "from torchvision import datasets\n\nmnist_train = datasets.CIFAR10(root=\"./datasets\", train=True, transform=transforms.ToTensor(), download=True)", "_____no_output_____" ] ], [ [ "Of course, there's [many more](https://pytorch.org/vision/stable/datasets.html).\nCurrently, datasets for image classification (e.g. MNIST, CIFAR, ImageNet), object detection (VOC, COCO, Cityscapes), and video action recognition (UCF101, Kinetics) are included.\n\nFor formatting, pre-processing, and augmenting, [transforms](https://pytorch.org/vision/stable/transforms.html) can come in handy.\nAgain, we've seen this before (see above), when we used a transform to convert the MNIST data from PIL images to PyTorch tensors.\nHowever, transforms can be used for much more. \nPreprocessing steps like data whitening are common before feeding the data into the model.\nAlso, in many cases, we use data augmentations to artificially inflate our dataset and learn invariances.\nTransforms are a versatile tool for all of these.", "_____no_output_____" ], [ "#### Leveraging popular convolutional neural networks\n\nWhile you certainly can build your own custom CNNs like we did above, more often than not, it's better to use one of the popular existing architectures. \nThe Torchvision documentation has a [list of supported CNNs](https://pytorch.org/vision/stable/models.html), as well as some performance characteristics. \nThere's a number of reasons for using one of these CNNs instead of designing your own.\n\nFirst, for image datasets larger and more complex than CIFAR and MNIST (which is basically all of them), a fair amount network depth and width is often necessary.\nFor example, some of the popular CNNs can be over 100 layers deep, with several tricks and details beyond what we've covered in this notebook.\nCoding all of this yourself has a high potential for error, especially when you're first getting started.\nInstead, you can create the CNN architecture using Torchvision, using a couple lines:", "_____no_output_____" ] ], [ [ "import torchvision.models as models\nresnet18 = models.resnet18()\nprint(resnet18)", "_____no_output_____" ] ], [ [ "Loading a working CNN architecture in a couple lines can save a significant amount of time both implementing and debugging.\n\nThe second, perhaps even more important, reason to use one of these existing architectures is the ability to use pre-trained weights.\nEarly on in the recent resurgence of deep learning, people discovered that the weights of a CNN trained for ImageNet classification were highly transferable. \nFor example, it is common to use the weights of an ImageNet-trained CNN as a weight initialization for other vision tasks, or even to freeze the bulk of the weights and only re-train the final classification layer(s) on a new task.\nThis is significant, as in most settings, we rarely have enough labeled data to train a powerful CNN from scratch without overfitting.\nLoading pre-trained CNN is also pretty simple, involving an additional argument to the previous cell block:\n\n`resnet18 = models.resnet18(pretrained=True)`\n\n<font size=\"1\">*We will not be using the above command, as running it will initiate a download of the pre-trained weights, which is a fairly large file.*</font>\n\nA full tutorial on using pre-trained CNNs is a little beyond the scope of this notebook.\nSee [this tutorial](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html) for an example.", "_____no_output_____" ], [ "#### Other computer vision tasks\nThe base CNN architectures were often designed for image classification, but the same CNNs are often used as the backbone of most modern computer vision models.\nThese other models often take this base CNN and include additional networks or make other architecture changes to adapt them to other tasks, such as object detection.\nTorchvision contains a few models (and pre-trained weights) for object detection, segmentation, and video action recognition.\nFor example, to load a [Faster R-CNN](https://arxiv.org/abs/1506.01497) with a [ResNet50](https://arxiv.org/abs/1512.03385) convolutional feature extractor with [Feature Pyramid Networks](https://arxiv.org/abs/1612.03144) pre-trained on [MS COCO](http://cocodataset.org/#home):\n\n`object_detector = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)`\n\n<font size=\"1\">*Again, this line has been commented out to prevent loading a large network for this demo.*</font>\n\nTorchvision's selection of non-classification models is relatively light, and not particularly flexible.\nA number of other libraries are available, depending on the task.\nFor example, for object detection and segmentation, Facebook AI Research's [Detectron2](https://github.com/facebookresearch/detectron2) is highly recommend.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0577ff415bc66d1414fc1087732e2d18d2a5039
23,990
ipynb
Jupyter Notebook
mem_mem/t2-cke.ipynb
3upperm2n/trans_kernel_model
72a9156fa35b5b5407173f6dbde685feb0a6a3f5
[ "MIT" ]
null
null
null
mem_mem/t2-cke.ipynb
3upperm2n/trans_kernel_model
72a9156fa35b5b5407173f6dbde685feb0a6a3f5
[ "MIT" ]
null
null
null
mem_mem/t2-cke.ipynb
3upperm2n/trans_kernel_model
72a9156fa35b5b5407173f6dbde685feb0a6a3f5
[ "MIT" ]
null
null
null
27.138009
98
0.388245
[ [ [ "import warnings\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys # error msg\nimport operator # sorting\nfrom math import *\n\nfrom read_trace import *\nfrom avgblkmodel import *\n\nwarnings.filterwarnings(\"ignore\", category=np.VisibleDeprecationWarning)", "_____no_output_____" ] ], [ [ "# gpu info", "_____no_output_____" ] ], [ [ "gtx950 = DeviceInfo()\ngtx950.sm_num = 6\ngtx950.sharedmem_per_sm = 49152\ngtx950.reg_per_sm = 65536\ngtx950.maxthreads_per_sm = 2048", "_____no_output_____" ] ], [ [ "# single stream info", "_____no_output_____" ] ], [ [ "data_size = 23000\ntrace_file = './1cke/trace_' + str(data_size) + '.csv'\ndf_trace = trace2dataframe(trace_file) # read the trace to the dataframe", "_____no_output_____" ], [ "df_trace", "_____no_output_____" ], [ "df_single_stream = model_param_from_trace_v1(df_trace)\ndf_single_stream.head(20)", "_____no_output_____" ], [ "df_s1 = reset_starting(df_single_stream)", "_____no_output_____" ], [ "df_s1", "_____no_output_____" ] ], [ [ "### running 2cke case", "_____no_output_____" ] ], [ [ "stream_num = 2\n\ndf_cke_list = []\nfor x in range(stream_num):\n df_cke_list.append(df_s1.copy(deep=True))", "_____no_output_____" ], [ "df_cke_list[0]", "_____no_output_____" ], [ "df_cke_list[1]", "_____no_output_____" ], [ "H2D_H2D_OVLP_TH = 3.158431\n\nfor i in range(1,stream_num):\n # compute the time for the init data transfer\n stream_startTime = find_whentostart_comingStream(df_cke_list[i-1], H2D_H2D_OVLP_TH)\n print('stream_startTime : {}'.format(stream_startTime))\n df_cke_list[i].start += stream_startTime\n df_cke_list[i].end += stream_startTime", "stream_startTime : 0.0341120000001\n" ], [ "df_cke_list[0]", "_____no_output_____" ], [ "df_cke_list[1]", "_____no_output_____" ] ], [ [ "### check whether there is h2d overlapping", "_____no_output_____" ] ], [ [ "prev_stm_h2ds_start, prev_stm_h2ds_end = find_h2ds_timing(df_cke_list[0])\nprint(\"prev stream h2ds : {} - {}\".format(prev_stm_h2ds_start, prev_stm_h2ds_end))\n\ncurr_stm_h2ds_start, curr_stm_h2ds_end = find_h2ds_timing(df_cke_list[1])\nprint(\"curr stream h2ds : {} - {}\".format(curr_stm_h2ds_start, curr_stm_h2ds_end))", "prev stream h2ds : 0.0 - 0.0341120000001\ncurr stream h2ds : 0.0341120000001 - 0.0682240000001\n" ], [ "if curr_stm_h2ds_start >=prev_stm_h2ds_start and curr_stm_h2ds_start < prev_stm_h2ds_end:\n h2ds_ovlp_between_stream = True \nelse:\n h2ds_ovlp_between_stream = False\n\nprint(\"h2ds_ovlp_between_stream : {}\".format(h2ds_ovlp_between_stream))", "h2ds_ovlp_between_stream : False\n" ] ], [ [ "### check kernel overlapping", "_____no_output_____" ] ], [ [ "prev_stm_kern_start, prev_stm_kern_end = find_kern_timing(df_cke_list[0])\nprint(\"prev stream kern : {} - {}\".format(prev_stm_kern_start, prev_stm_kern_end))\n\ncurr_stm_kern_start, curr_stm_kern_end = find_kern_timing(df_cke_list[1])\nprint(\"curr stream kern : {} - {}\".format(curr_stm_kern_start, curr_stm_kern_end))\n", "prev stream kern : 0.196001 - 0.257057\ncurr stream kern : 0.230113 - 0.291169\n" ], [ "if prev_stm_kern_start <= curr_stm_kern_start < prev_stm_kern_end:\n kern_ovlp_between_stream = True \nelse:\n kern_ovlp_between_stream = False\n\nprint(\"kern_ovlp_between_stream : {}\".format(kern_ovlp_between_stream))", "kern_ovlp_between_stream : True\n" ] ], [ [ "#### use cke model if kern_ovlp_between_stream is true", "_____no_output_____" ] ], [ [ "# get the overlapping kernel info from both stream\n\nkernel_ = model_cke_from_same_kernel(gtx950, df_trace, )", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d05784163859cc34f86910734bbb590f644dc55c
114,641
ipynb
Jupyter Notebook
sql_alchemy.ipynb
Yuva38/sqlalchemy-challenge
dfa6ef97b3f200f4b5b265bf9514be9f14803a26
[ "ADSL" ]
null
null
null
sql_alchemy.ipynb
Yuva38/sqlalchemy-challenge
dfa6ef97b3f200f4b5b265bf9514be9f14803a26
[ "ADSL" ]
null
null
null
sql_alchemy.ipynb
Yuva38/sqlalchemy-challenge
dfa6ef97b3f200f4b5b265bf9514be9f14803a26
[ "ADSL" ]
null
null
null
96.661889
55,236
0.824007
[ [ [ "%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "import datetime as dt", "_____no_output_____" ] ], [ [ "# Reflect Tables into SQLAlchemy ORM", "_____no_output_____" ] ], [ [ "# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func", "_____no_output_____" ], [ "engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")", "_____no_output_____" ], [ "# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)", "_____no_output_____" ], [ "# We can view all of the classes that automap found\nBase.classes.keys()", "_____no_output_____" ], [ "# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station", "_____no_output_____" ], [ "# Create our session (link) from Python to the DB\nsession = Session(engine)", "_____no_output_____" ] ], [ [ "# Exploratory Climate Analysis using pandas", "_____no_output_____" ] ], [ [ "# Design a query to retrieve the last 12 months of precipitation data and plot the results\n\n# Calculate the date 1 year ago from the last data point in the database\n\n# Perform a query to retrieve the data and precipitation scores\n\n# Save the query results as a Pandas DataFrame and set the index to the date column\n\n# Sort the dataframe by date\n\n# Use Pandas Plotting with Matplotlib to plot the data\n\nweather_data = pd.read_sql(\"SELECT * FROM measurement\", engine)\nweather_data.head()", "_____no_output_____" ], [ "# Latest Date\nlatest_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date\nlatest_date", "_____no_output_____" ], [ "end_date = latest_date", "_____no_output_____" ], [ "end_date", "_____no_output_____" ], [ "start_date = dt.datetime.strptime(end_date, '%Y-%m-%d') - dt.timedelta(days=365)", "_____no_output_____" ], [ "start_date", "_____no_output_____" ], [ "start_date = start_date.strftime('%y-%m-%d')\nstart_date", "_____no_output_____" ], [ "start_date = \"2016-08-23\"\nend_date = \"2017-08-23\"\n\nweather_data_one_year = weather_data[weather_data[\"date\"].between(start_date, end_date)]\nweather_data_one_year.head()", "_____no_output_____" ], [ "len(weather_data_one_year)", "_____no_output_____" ], [ "precipitation_data = weather_data_one_year[[\"prcp\", \"date\"]]\nprecipitation_data.set_index('date', inplace=True)\n# Sort the dataframe by date\nprecipitation_data_sorted = precipitation_data.sort_values('date', ascending=True )\nprecipitation_data_sorted.head()", "_____no_output_____" ], [ "\n# Use Pandas Plotting with Matplotlib to plot the data\n# Rotate the xticks for the dates\nprecipitation_chart = precipitation_data_sorted.plot(kind = \"line\",grid=True, figsize=(10,6), rot=30, x_compat=True, fontsize=12, title = \"Precipitation data for one year\")\nprecipitation_chart.set_xlabel(\"Date\")\nprecipitation_chart.set_ylabel(\"Precipitation\")\nplt.show()", "_____no_output_____" ], [ "# Use Pandas to calcualte the summary statistics for the precipitation data\nprecipitation_data_sorted.describe()", "_____no_output_____" ], [ "# Design a query to show how many stations are available in this dataset?\n\nstation_data = pd.read_sql(\"SELECT * FROM station\", engine)\nstation_data\n", "_____no_output_____" ], [ "station_data[\"station\"].count()", "_____no_output_____" ], [ "# What are the most active stations? (i.e. what stations have the most rows)?\n# List the stations and the counts in descending order.\nweather_data[\"station\"].value_counts()", "_____no_output_____" ], [ "weather_data_station_counts = weather_data[\"station\"].value_counts()", "_____no_output_____" ], [ "# The station with maximum number of temperature observations\nactive_station = weather_data_station_counts.index[0]\nactive_station", "_____no_output_____" ], [ "# Using the station id from the previous query, calculate the lowest temperature recorded, \n# highest temperature recorded, and average temperature of the most active station?\nweather_data_active_station = weather_data.loc[(weather_data[\"station\"] == active_station), :]\nLowest_temperature = weather_data_active_station[\"tobs\"].min()\nHighest_temperature = weather_data_active_station[\"tobs\"].max()\nAverage_temperature = weather_data_active_station[\"tobs\"].mean()\nprint(f\"For the most active station The lowest temperature, The Highest temperature, The Average temperature is {Lowest_temperature} , {Highest_temperature}, {Average_temperature}\")\n", "For the most active station The lowest temperature, The Highest temperature, The Average temperature is 54.0 , 85.0, 71.66378066378067\n" ], [ "# Choose the station with the highest number of temperature observations.\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\nstart_date = \"2016-08-23\"\nend_date = \"2017-08-23\"\n\nweather_data_one_year = weather_data[weather_data[\"date\"].between(start_date, end_date)]\nweather_data_active_station_one_year = weather_data_one_year.loc[(weather_data_one_year[\"station\"] == active_station), :]\n\n", "_____no_output_____" ], [ "temperature_data = weather_data_active_station_one_year[[\"tobs\", \"date\"]]\nx_data = temperature_data[\"tobs\"]", "_____no_output_____" ], [ "plt.hist(x_data, 12, label = \"tobs\") \nplt.xlabel('Temperature')\nplt.ylabel('Frequency')\nplt.legend(loc=1, prop={'size': 14})\nplt.show()", "_____no_output_____" ], [ "# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' \n# and return the minimum, average, and maximum temperatures for that range of dates\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n# function usage example\nprint(calc_temps('2012-02-28', '2012-03-05'))", "[(62.0, 69.57142857142857, 74.0)]\n" ], [ "# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax \n# for your trip using the previous year's data for those same dates.\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\nprint(calc_temps('2017-02-28', '2017-03-05'))", "[(64.0, 72.02777777777777, 78.0)]\n" ], [ "trip_results = calc_temps('2017-02-28', '2017-03-05')\ntrip_results", "_____no_output_____" ], [ "# Plot the results from your previous query as a bar chart. \n# Use \"Trip Avg Temp\" as your Title\n# Use the average temperature for the y value\n# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)\ntrip_df = pd.DataFrame(trip_results, columns=['Min Temp', 'Avg Temp', 'Max Temp'])\navg_temp = trip_df['Avg Temp']\nmin_max_temp = trip_df.iloc[0]['Max Temp'] - trip_df.iloc[0]['Min Temp']\ntemp_chart = avg_temp.plot(kind='bar', yerr=min_max_temp, grid = True, figsize=(6,8), alpha=0.5, color='coral')\ntemp_chart.set_title(\"Trip Avg Temp\", fontsize=20)\ntemp_chart.set_ylabel(\"Temp (F)\")\nplt.xticks([])\nplt.show()\n", "_____no_output_____" ], [ "# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.\n# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation\ntrip_start_date = \"2017-02-28\"\ntrip_end_date = \"2017-03-5\"\n\nweather_data_one_year_trip = weather_data_one_year[weather_data_one_year[\"date\"].between(trip_start_date, trip_end_date)]\nweather_data_one_year_trip_per_station = weather_data_one_year_trip.groupby(\"station\")\nweather_data_one_year_trip_per_station[\"prcp\"].sum()\n\n", "_____no_output_____" ] ], [ [ "## Optional Challenge Assignment", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d057a9fef92c36f8b2e3187fe1108e0db7938b3f
310,799
ipynb
Jupyter Notebook
Time Series Analysis/Time Series Forecasting - Autoregression (AR)/Autoregression (AR).ipynb
shreejitverma/Data-Scientist
03c06936e957f93182bb18362b01383e5775ffb1
[ "MIT" ]
2
2022-03-12T04:53:03.000Z
2022-03-27T12:39:21.000Z
Time Series Analysis/Time Series Forecasting - Autoregression (AR)/Autoregression (AR).ipynb
shreejitverma/Data-Scientist
03c06936e957f93182bb18362b01383e5775ffb1
[ "MIT" ]
null
null
null
Time Series Analysis/Time Series Forecasting - Autoregression (AR)/Autoregression (AR).ipynb
shreejitverma/Data-Scientist
03c06936e957f93182bb18362b01383e5775ffb1
[ "MIT" ]
2
2022-03-12T04:52:21.000Z
2022-03-27T12:45:32.000Z
607.029297
103,888
0.950425
[ [ [ "# Tutorial - Time Series Forecasting - Autoregression (AR)\n\nThe goal is to forecast time series with the Autoregression (AR) Approach. 1) JetRail Commuter, 2) Air Passengers, 3) Function Autoregression with Air Passengers, and 5) Function Autoregression with Wine Sales.\n\nReferences Jason Brownlee - https://machinelearningmastery.com/time-series-forecasting-methods-in-python-cheat-sheet/", "_____no_output_____" ] ], [ [ "import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt\nimport datetime\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "# Load File\nurl = 'https://raw.githubusercontent.com/tristanga/Machine-Learning/master/Data/JetRail%20Avg%20Hourly%20Traffic%20Data%20-%202012-2013.csv'\ndf = pd.read_csv(url)\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 494 entries, 0 to 493\nData columns (total 3 columns):\nDatetime 494 non-null object\nID 494 non-null float64\nCount 494 non-null float64\ndtypes: float64(2), object(1)\nmemory usage: 11.7+ KB\n" ], [ "df.Datetime = pd.to_datetime(df.Datetime,format='%Y-%m-%d %H:%M') \ndf.index = df.Datetime", "_____no_output_____" ] ], [ [ "# Autoregression (AR) Approach with JetRail \n\nThe autoregression (AR) method models the next step in the sequence as a linear function of the observations at prior time steps.\n\nThe notation for the model involves specifying the order of the model p as a parameter to the AR function, e.g. AR(p). For example, AR(1) is a first-order autoregression model.\n\nThe method is suitable for univariate time series without trend and seasonal components.", "_____no_output_____" ] ], [ [ "#Split Train Test\nimport math\ntotal_size=len(df)\nsplit = 10392 / 11856\ntrain_size=math.floor(split*total_size)\ntrain=df.head(train_size)\ntest=df.tail(len(df) -train_size)", "_____no_output_____" ], [ "from statsmodels.tsa.ar_model import AR\nmodel = AR(train.Count)\nfit1 = model.fit()\ny_hat = test.copy()\ny_hat['AR'] = fit1.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)", "_____no_output_____" ], [ "#Plotting data\nplt.figure(figsize=(12,8))\nplt.plot(train.index, train['Count'], label='Train')\nplt.plot(test.index,test['Count'], label='Test')\nplt.plot(y_hat.index,y_hat['AR'], label='AR')\nplt.legend(loc='best')\nplt.title(\"Autoregression (AR) Forecast\")\nplt.show()", "_____no_output_____" ] ], [ [ "# RMSE Calculation", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error\nfrom math import sqrt\nrms = sqrt(mean_squared_error(test.Count, y_hat.AR))\nprint('RMSE = '+str(rms))", "RMSE = 28.635096626807453\n" ] ], [ [ "# Autoregression (AR) Approach with Air Passagers ", "_____no_output_____" ] ], [ [ "# Subsetting\nurl = 'https://raw.githubusercontent.com/tristanga/Machine-Learning/master/Data/International%20Airline%20Passengers.csv'\ndf = pd.read_csv(url, sep =\";\")\ndf.info()\ndf.Month = pd.to_datetime(df.Month,format='%Y-%m')\ndf.index = df.Month\n#df.head()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 144 entries, 0 to 143\nData columns (total 2 columns):\nMonth 144 non-null object\nPassengers 144 non-null int64\ndtypes: int64(1), object(1)\nmemory usage: 2.3+ KB\n" ], [ "#Creating train and test set \nimport math\ntotal_size=len(df)\ntrain_size=math.floor(0.7*total_size) #(70% Dataset)\ntrain=df.head(train_size)\ntest=df.tail(len(df) -train_size)\n#train.info()\n#test.info()", "_____no_output_____" ], [ "from statsmodels.tsa.ar_model import AR\n# Create prediction table\ny_hat = test.copy()\nmodel = AR(train['Passengers'])\nfit1 = model.fit()\ny_hat['AR'] = fit1.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)\ny_hat.describe()", "_____no_output_____" ], [ "plt.figure(figsize=(12,8))\nplt.plot(train.index, train['Passengers'], label='Train')\nplt.plot(test.index,test['Passengers'], label='Test')\nplt.plot(y_hat.index,y_hat['AR'], label='AR')\nplt.legend(loc='best')\nplt.title(\"Autoregression (AR)\")\nplt.show()", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error\nfrom math import sqrt\nrms = sqrt(mean_squared_error(test.Passengers, y_hat.AR))\nprint('RMSE = '+str(rms))", "RMSE = 60.13838110500644\n" ] ], [ [ "# Function Autoregression (AR) Approach with variables", "_____no_output_____" ] ], [ [ "def AR_forecasting(mydf,colval,split):\n #print(split)\n import math\n from statsmodels.tsa.api import Holt\n from sklearn.metrics import mean_squared_error\n from math import sqrt\n global y_hat, train, test\n total_size=len(mydf)\n train_size=math.floor(split*total_size) #(70% Dataset)\n train=mydf.head(train_size)\n test=mydf.tail(len(mydf) -train_size)\n y_hat = test.copy()\n model = AR(train[colval])\n fit1 = model.fit()\n y_hat['AR'] = fit1.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)\n plt.figure(figsize=(12,8))\n plt.plot(train.index, train[colval], label='Train')\n plt.plot(test.index,test[colval], label='Test')\n plt.plot(y_hat.index,y_hat['AR'], label='AR')\n plt.legend(loc='best')\n plt.title(\"Autoregression (AR) Forecast\")\n plt.show()\n rms = sqrt(mean_squared_error(test[colval], y_hat.AR))\n print('RMSE = '+str(rms))", "_____no_output_____" ], [ "AR_forecasting(df,'Passengers',0.7)", "_____no_output_____" ] ], [ [ "# Testing Function Autoregression (AR) Approach with Wine Dataset ", "_____no_output_____" ] ], [ [ "url = 'https://raw.githubusercontent.com/tristanga/Data-Cleaning/master/Converting%20Time%20Series/Wine_Sales_R_Dataset.csv'\ndf = pd.read_csv(url)\ndf.info()\ndf.Date = pd.to_datetime(df.Date,format='%Y-%m-%d')\ndf.index = df.Date", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 176 entries, 0 to 175\nData columns (total 2 columns):\nDate 176 non-null object\nSales 176 non-null int64\ndtypes: int64(1), object(1)\nmemory usage: 2.8+ KB\n" ], [ "AR_forecasting(df,'Sales',0.7)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d057cbef9eaafa5143fcd75cf20634c2a5c08bcb
11,679
ipynb
Jupyter Notebook
numpy.ipynb
OmidMustafa/XOR_python
274b159ad3ace0330af943b0a151f1da8a9417b9
[ "MIT" ]
null
null
null
numpy.ipynb
OmidMustafa/XOR_python
274b159ad3ace0330af943b0a151f1da8a9417b9
[ "MIT" ]
null
null
null
numpy.ipynb
OmidMustafa/XOR_python
274b159ad3ace0330af943b0a151f1da8a9417b9
[ "MIT" ]
null
null
null
24.382046
224
0.315866
[ [ [ "<a href=\"https://colab.research.google.com/github/OmidMustafa/XOR_python/blob/main/numpy.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "# Vector 1-D array\r\na = [1,2,3] \r\n\r\na = a + [1]\r\n\r\nprint(a)\r\n\r\n# Numpy array 1-D \r\nb = np.array([4,5,6])\r\n\r\nb = np.append(b,[7])\r\n\r\n", "[1, 2, 3, 1]\n" ], [ "A = np.array([[1,22,3],[4,5,6],[111,-11,33]])\r\nB = np.array([[10,11,12],[13,14,15],[14,7,2.5]])\r\n\r\nA.shape", "_____no_output_____" ], [ "\r\nsum = np.sum(np.dot(A,B))\r\nprint(sum)\r\nsum.dtype", "5487.0\n" ], [ "C = np.array([[10,11,12],[13,14,15],[16,17,18]])\r\n\r\nC\r\nC.shape", "_____no_output_____" ], [ "K = np.array([[1,2,3],[1,2,3],[2,3,5]])\r\n\r\nK.ndim", "_____no_output_____" ], [ "F = np.random.random(size=5)\r\nF", "_____no_output_____" ], [ "L = np.random.rand(4,4,4)\r\nL\r\nL.shape", "_____no_output_____" ], [ "U = np.random.uniform(4.3,5.3,3)\r\nprint(U)\r\nU.shape", "[4.94584775 5.27103662 4.41071922]\n" ], [ "Z_3D = np.array([\r\n [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,9]\r\n ],\r\n\r\n [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,9]],\r\n\r\n [[1,2,3],\r\n [4,5,6],\r\n [7,8,9]\r\n ],\r\n\r\n [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,9]\r\n ]\r\n ]\r\n )\r\nprint(Z_3D)\r\nprint(\"Number of Dimensions\",Z_3D.ndim)\r\nprint(\"Size of Array\",Z_3D.size)", "[[[1 2 3]\n [4 5 6]\n [7 8 9]]\n\n [[1 2 3]\n [4 5 6]\n [7 8 9]]\n\n [[1 2 3]\n [4 5 6]\n [7 8 9]]\n\n [[1 2 3]\n [4 5 6]\n [7 8 9]]]\nNumber of Dimensions 3\nSize of Array 36\n" ], [ "B = np.array([[[\r\n [1,2,3],[1,2,3]],\r\n [[1,2,3],[1,2,3]],\r\n [[1,2,3],[1,2,3]]\r\n ]])\r\nprint(B)\r\nprint(B.ndim)\r\nB.shape", "[[[[1 2 3]\n [1 2 3]]\n\n [[1 2 3]\n [1 2 3]]\n\n [[1 2 3]\n [1 2 3]]]]\n4\n" ], [ "Z_3D = np.zeros_like([\r\n [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,9]\r\n ],\r\n\r\n [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,9]],\r\n\r\n [[1,2,3],\r\n [4,5,6],\r\n [7,8,9]\r\n ],\r\n\r\n [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,9]\r\n ]\r\n ]\r\n )\r\nprint(Z_3D)\r\nprint(\"Number of Dimensions \",Z_3D.ndim)\r\nprint(\"Size of Array\",Z_3D.size)", "[[[0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]]]\nNumber of Dimensions 3\nSize of Array 36\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d057db3337695d426e489c5fba6f4e6afb0a3f08
3,299
ipynb
Jupyter Notebook
StringsFatiamento.ipynb
laercio-carvalho/learning-python
880e4d92e5047df72ead76c5bf69dc9ac6033410
[ "BSD-3-Clause" ]
null
null
null
StringsFatiamento.ipynb
laercio-carvalho/learning-python
880e4d92e5047df72ead76c5bf69dc9ac6033410
[ "BSD-3-Clause" ]
null
null
null
StringsFatiamento.ipynb
laercio-carvalho/learning-python
880e4d92e5047df72ead76c5bf69dc9ac6033410
[ "BSD-3-Clause" ]
null
null
null
18.430168
99
0.476205
[ [ [ "\"dinheiro\" < \"felicidade\" # retorna True", "_____no_output_____" ], [ "\"sono\" > \"vontade\" # retorna False", "_____no_output_____" ], [ "primeira = \"uma string\"\nsegunda = \" outra string e o numero: \"\nnumero01 = 72\nnumero02 = 17\nprimeira + segunda + str(numero01 + numero02)", "_____no_output_____" ], [ "hello = \"Olá, mundo!\"", "_____no_output_____" ], [ "print(hello)\n#Extrai os 5 ultimos caractéres limitando dos 11, da direita pra esquerda, de 11 possíveis\nprint(hello[-4:11])\n#Apaga os 4 primeios caracteres dos 11 possíveis\nprint(hello[4:11])\n\n#Mostra somente os 3 primeios caracteres dos 11 possíveis\nprint(hello[0:3])\n#Apaga somente os 3 ultimos caracteres dos 11 possíveis\nprint(hello[0:-3])", "Olá, mundo!\nndo!\n mundo!\nOlá\nOlá, mun\n" ], [ "x = 'A linguagem Python é ótima'\nprint(x[12:-8])", "Python\n" ], [ "print(x[-1])", "a\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d057e65e165ef2d07e4655f06ed572a1d1e69e01
12,694
ipynb
Jupyter Notebook
site/it/tutorials/quickstart/advanced.ipynb
justaverygoodboy/docs-l10n
8d4857750f2b5e8e6889acbb4b1e2f98ad7ce34e
[ "Apache-2.0" ]
null
null
null
site/it/tutorials/quickstart/advanced.ipynb
justaverygoodboy/docs-l10n
8d4857750f2b5e8e6889acbb4b1e2f98ad7ce34e
[ "Apache-2.0" ]
null
null
null
site/it/tutorials/quickstart/advanced.ipynb
justaverygoodboy/docs-l10n
8d4857750f2b5e8e6889acbb4b1e2f98ad7ce34e
[ "Apache-2.0" ]
null
null
null
31.814536
347
0.5219
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Introduzione a TensorFlow 2 per esperti", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/quickstart/advanced\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />Visualizza su TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/it/tutorials/quickstart/advanced.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Esegui in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/it/tutorials/quickstart/advanced.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />Visualizza il sorgente su GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/it/tutorials/quickstart/advanced.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Scarica il notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "Note: La nostra comunità di Tensorflow ha tradotto questi documenti. Poichè queste traduzioni sono *best-effort*, non è garantito che rispecchino in maniera precisa e aggiornata la [documentazione ufficiale in inglese](https://www.tensorflow.org/?hl=en). \nSe avete suggerimenti per migliorare questa traduzione, mandate per favore una pull request al repository Github [tensorflow/docs](https://github.com/tensorflow/docs). \nPer proporsi come volontari alla scrittura o alla review delle traduzioni della comunità contattate la \n[mailing list [email protected]](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).", "_____no_output_____" ], [ "Questo è un [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb) notebook file. I programmi Python sono eseguiti direttamente nel browser—un ottimo modo per imparare e utilizzare TensorFlow. Per seguire questo tutorial, esegui il file notebook in Google Colab cliccando sul bottone in cima a questa pagina.\n\n1. All'interno di Colab, connettiti al runtime di Python: In alto a destra della barra dei menu, seleziona *CONNECT*.\n2. Esegui tutte le celle di codice di notebook: Seleziona *Runtime* > *Run all*.", "_____no_output_____" ], [ "Scarica e installa il pacchetto TensorFlow 2:", "_____no_output_____" ], [ "Importa TensorFlow nel tuo codice:", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D\nfrom tensorflow.keras import Model", "_____no_output_____" ] ], [ [ "Carica e prepara il [dataset MNIST](http://yann.lecun.com/exdb/mnist/).", "_____no_output_____" ] ], [ [ "mnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\n# Add a channels dimension\nx_train = x_train[..., tf.newaxis]\nx_test = x_test[..., tf.newaxis]", "_____no_output_____" ] ], [ [ "Usa `tf.data` per raggruppare e mischiare il dataset:", "_____no_output_____" ] ], [ [ "train_ds = tf.data.Dataset.from_tensor_slices(\n (x_train, y_train)).shuffle(10000).batch(32)\n\ntest_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)", "_____no_output_____" ] ], [ [ "Costrusci il modello `tf.keras` usando l'[API Keras per creare sottoclassi di modelli](https://www.tensorflow.org/guide/keras#model_subclassing):", "_____no_output_____" ] ], [ [ "class MyModel(Model):\n def __init__(self):\n super(MyModel, self).__init__()\n self.conv1 = Conv2D(32, 3, activation='relu')\n self.flatten = Flatten()\n self.d1 = Dense(128, activation='relu')\n self.d2 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.conv1(x)\n x = self.flatten(x)\n x = self.d1(x)\n return self.d2(x)\n\n# Create an instance of the model\nmodel = MyModel()", "_____no_output_____" ] ], [ [ "Scegli un metodo di ottimizzazione e una funzione obiettivo per l'addestramento:", "_____no_output_____" ] ], [ [ "loss_object = tf.keras.losses.SparseCategoricalCrossentropy()\n\noptimizer = tf.keras.optimizers.Adam()", "_____no_output_____" ] ], [ [ "Seleziona delle metriche per misurare la pertita e l'accuratezza del modello. Queste metriche accumulano i valori alle varie epoche e alla fine stampano il risultato globale.", "_____no_output_____" ] ], [ [ "train_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\ntest_loss = tf.keras.metrics.Mean(name='test_loss')\ntest_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')", "_____no_output_____" ] ], [ [ "Usa `tf.GradientTape` per addestrare il modello:", "_____no_output_____" ] ], [ [ "@tf.function\ndef train_step(images, labels):\n with tf.GradientTape() as tape:\n predictions = model(images)\n loss = loss_object(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(labels, predictions)", "_____no_output_____" ] ], [ [ "Testa il modello:", "_____no_output_____" ] ], [ [ "@tf.function\ndef test_step(images, labels):\n predictions = model(images)\n t_loss = loss_object(labels, predictions)\n\n test_loss(t_loss)\n test_accuracy(labels, predictions)", "_____no_output_____" ], [ "EPOCHS = 5\n\nfor epoch in range(EPOCHS):\n for images, labels in train_ds:\n train_step(images, labels)\n\n for test_images, test_labels in test_ds:\n test_step(test_images, test_labels)\n\n template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'\n print(template.format(epoch+1,\n train_loss.result(),\n train_accuracy.result()*100,\n test_loss.result(),\n test_accuracy.result()*100))\n\n # Reset the metrics for the next epoch\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()", "_____no_output_____" ] ], [ [ "Il classificatore di immagini è ora addestrato per circa il 98% di accuratezza su questo insieme di dati. Per approfondire, leggi i [tutorials di TensorFlow](https://www.tensorflow.org/tutorials).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d057eedb9058f81221bf706ba1f43b9e05f337f7
228,675
ipynb
Jupyter Notebook
experiment_3.ipynb
Tirami-su/rolemagnet
c861f6a7918bbae76667ca98651d045639eacd04
[ "MIT" ]
null
null
null
experiment_3.ipynb
Tirami-su/rolemagnet
c861f6a7918bbae76667ca98651d045639eacd04
[ "MIT" ]
null
null
null
experiment_3.ipynb
Tirami-su/rolemagnet
c861f6a7918bbae76667ca98651d045639eacd04
[ "MIT" ]
null
null
null
670.601173
113,876
0.946295
[ [ [ "用带有三种类型噪声(度,边权重,点权重)的传销模型网络测试RoleMagnet的抗噪性", "_____no_output_____" ] ], [ [ "import numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Creating a graph\n模拟23人的小型传销组织,带少量噪声", "_____no_output_____" ] ], [ [ "%matplotlib inline\nplt.rcParams['figure.dpi'] = 150\nplt.rcParams['figure.figsize'] = (4, 3)\n\nG = nx.DiGraph()\nG.add_weighted_edges_from([('11','s1',0.07),('12','s1',0.1),('13','s1',0.06),('14','s1',0.09),('15','s1',0.08),\n ('21','s2',0.07),('22','s2',0.1),('23','s2',0.06),('24','s2',0.09),('25','s2',0.08),('26','s2',0.1),\n ('31','s3',0.1),('32','s3',0.1),('33','s3',0.1),('34','s3',0.1),('35','s3',0.1),('36','s3',0.1),\n ('s1','mid',0.4),('s2','mid',0.5),('s3','mid',0.55),\n ('mid','boss',0.7),('mid','w1',0.72),\n ('w1','41',0.065),('w1','42',0.05),('w1','43',0.06),('w1','44',0.055),('w1','51',0.24),('w1','52',0.25)])\n# 净获利\nbalance=[-0.07,0,-0.1,-0.06,-0.09,-0.08,\n -0.07,0,-0.1,-0.06,-0.09,-0.08,-0.1,\n -0.1,0.05,-0.1,-0.1,-0.1,-0.1,-0.1,\n 0.03,0.7,0,\n 0.065,0.05,0.06,0.055,0.24,0.25]\ncolor=['lightgray','violet','lightgray','lightgray','lightgray','lightgray',\n 'lightgray','violet','lightgray','lightgray','lightgray','lightgray','lightgray',\n 'lightgray','violet','lightgray','lightgray','lightgray','lightgray','lightgray',\n 'orange','r','limegreen',\n 'c','c','c','c','pink','pink']\nnx.draw_planar(G, with_labels=True, node_color=color, node_size=300, font_size=7)\nplt.show()", "_____no_output_____" ] ], [ [ "## RoleMagnet", "_____no_output_____" ] ], [ [ "import rolemagnet as rm\nvec,role,label=rm.role_magnet(G, balance=balance)", "Embedding: 100.00% -\nSOM shape: [11, 7]\nTraining SOM: 145\n" ] ], [ [ "## Visualization\n可视化节点的向量表示,用PCA降到二维后再次可视化", "_____no_output_____" ] ], [ [ "print ('三维嵌入结果')\nfor i in range(len(G.nodes)):\n print (list(G.nodes)[i],'\\t',vec[i])\n \nfrom mpl_toolkits.mplot3d import Axes3D\ncoord = np.transpose(vec)\nfig = plt.figure(figsize=(4,3))\nax = Axes3D(fig)\nax.scatter(coord[0], coord[1], coord[2], c=color, s=150)\nplt.show()\n\n# 再次降到二维\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\nreduced=PCA(n_components=2).fit_transform(StandardScaler().fit_transform(vec))\nprint ('二维嵌入结果')\nfor i in range(len(G.nodes)):\n print (list(G.nodes)[i],'\\t',reduced[i])\n\ncoord = np.transpose(reduced)\nplt.scatter(coord[0], coord[1], c=color, s=150, linewidths=0.8, edgecolors='k')\nplt.title(\"RoleMagnet\")\nplt.show()", "三维嵌入结果\n11 \t [-4.656918 2.50780243 -2.60384377]\ns1 \t [13.6955635 -7.36617524 0. ]\n12 \t [-3.4945784 1.04689359 -3.71977681]\n13 \t [-4.72707781 3.05246777 -2.23186608]\n14 \t [-3.95635282 1.50288243 -3.34779913]\n15 \t [-4.37087035 1.9886804 -2.97582145]\n21 \t [-4.5643264 3.33294681 -2.60384377]\ns2 \t [ 17.36800426 -11.11766841 0. ]\n22 \t [-4.23934001 2.05242074 -3.71977681]\n23 \t [-4.37037584 3.68885727 -2.23186608]\n24 \t [-4.48608341 2.46724424 -3.34779913]\n25 \t [-4.60589919 2.90127084 -2.97582145]\n26 \t [-4.23934001 2.05242074 -3.71977681]\n31 \t [-4.41890655 2.45218286 -3.71977681]\ns3 \t [ 18.0140488 -11.44197277 1.8598884 ]\n32 \t [-4.41890655 2.45218286 -3.71977681]\n33 \t [-4.41890655 2.45218286 -3.71977681]\n34 \t [-4.41890655 2.45218286 -3.71977681]\n35 \t [-4.41890655 2.45218286 -3.71977681]\n36 \t [-4.41890655 2.45218286 -3.71977681]\nmid \t [29.54709281 2.7854159 1.11593304]\nboss \t [-0.24243254 -5.20282019 26.03843765]\nw1 \t [16.38327335 28.67939275 0. ]\n41 \t [-3.55963367 -6.53538914 2.41785492]\n42 \t [-3.69861603 -6.33304626 1.8598884 ]\n43 \t [-3.59066646 -6.48454517 2.23186608]\n44 \t [-3.63486767 -6.41754297 2.04587724]\n51 \t [-3.02674862 -5.96176481 8.92746434]\n52 \t [-3.03041617 -5.91086811 9.29944202]\n" ] ], [ [ "## Evaluation\n用 Adjusted Rand Index 和 V-Measure 两种指标评价聚类结果", "_____no_output_____" ] ], [ [ "from sklearn.metrics.cluster import adjusted_rand_score, homogeneity_completeness_v_measure\n\ntrue_label=[1,2,1,1,1,1,\n 1,2,1,1,1,1,1,\n 1,2,1,1,1,1,1,\n 3,4,5,6,6,6,6,7,7]\n\nprint('Adjusted Rand Index:',adjusted_rand_score(true_label,label))\nprint('V-Measure:',homogeneity_completeness_v_measure(true_label,label))\nprint('\\n聚类结果')\nfor k,v in role.items():\n print(k,v[0])\n for i in v[1]:\n print(' ',list(G.nodes)[i])", "Adjusted Rand Index: 0.9892723141150981\nV-Measure: (1.0, 0.9536171907216509, 0.9762579846765088)\n\n聚类结果\n21 [-0.6 -0.4]\n 11\n 12\n 13\n 14\n 15\n 21\n 22\n 24\n 23\n 25\n 26\n 31\n 32\n 33\n 34\n 35\n 36\n45 [1.2 1.2]\n s1\n59 [1.6 1.2]\n s2\n s3\n41 [0.6 3.2]\n mid\n71 [ 3.6 -0.2]\n boss\n6 [-2.2 3.2]\n w1\n35 [ 0.8 -0.6]\n 41\n 43\n 44\n 42\n49 [ 1.6 -0.6]\n 51\n 52\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d058062fbabb535bec72faa90777b992c6b0ef17
2,131
ipynb
Jupyter Notebook
Euler 014 - Longest Collatz Sequence.ipynb
Radcliffe/project-euler
5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38
[ "MIT" ]
6
2016-05-11T18:55:35.000Z
2019-12-27T21:38:43.000Z
Euler 014 - Longest Collatz Sequence.ipynb
Radcliffe/project-euler
5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38
[ "MIT" ]
null
null
null
Euler 014 - Longest Collatz Sequence.ipynb
Radcliffe/project-euler
5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38
[ "MIT" ]
null
null
null
23.163043
205
0.487095
[ [ [ "Euler Problem 14\n================\n\nThe following iterative sequence is defined for the set of positive integers:\n\n n → n/2 (n is even)\n n → 3n + 1 (n is odd)\n\nUsing the rule above and starting with 13, we generate the following sequence:\n\n 13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1\n\nIt can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.\n\nWhich starting number, under one million, produces the longest chain?\n\nNOTE: Once the chain starts the terms are allowed to go above one million.", "_____no_output_____" ] ], [ [ "D = {1:0}\nmaxlen = 0\nstart = 1\n\ndef collatz(n):\n if n in D:\n return D[n]\n elif (n % 2):\n c = 1 + collatz(3*n+1)\n else:\n c = 1 + collatz(n/2)\n D[n] = c\n return c\n\nfor n in range(1,1000000):\n c = collatz(n)\n if c > maxlen:\n maxlen = c\n start = n\nprint(start)", "837799\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d0580f43c3619fb17857ab348d02b578c8563f01
10,861
ipynb
Jupyter Notebook
FormacaoPythonParaDataScience/PythonPandas-TratandoAnalisandoDados/CursoPandas/SelecoesFrequencias.ipynb
anablima/TreinamentosAlura
1a512f80daf6b6874ccd036736679c3982bcbccc
[ "MIT" ]
1
2022-02-04T10:36:22.000Z
2022-02-04T10:36:22.000Z
FormacaoPythonParaDataScience/PythonPandas-TratandoAnalisandoDados/CursoPandas/SelecoesFrequencias.ipynb
anablima/TreinamentosAlura
1a512f80daf6b6874ccd036736679c3982bcbccc
[ "MIT" ]
null
null
null
FormacaoPythonParaDataScience/PythonPandas-TratandoAnalisandoDados/CursoPandas/SelecoesFrequencias.ipynb
anablima/TreinamentosAlura
1a512f80daf6b6874ccd036736679c3982bcbccc
[ "MIT" ]
null
null
null
28.732804
123
0.370224
[ [ [ "# Relatório de Análise IV", "_____no_output_____" ], [ "## Seleções e Frequências", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "dados = pd.read_csv('dados/aluguel_residencial.csv', sep = ';')", "_____no_output_____" ], [ "dados.head(10)", "_____no_output_____" ], [ "# Selecione somente os imóveis classificados com tipo 'Apartamento'\nselecao = dados['Tipo'] == 'Apartamento'\nn1 = dados[selecao].shape[0]\nn1", "_____no_output_____" ], [ "# Selecione os imóveis classificados com tipos 'Casa', 'Casa de Condomínio' e 'Casa de Vila'\nselecao = (dados['Tipo'] == 'Casa') | (dados['Tipo'] == 'Casa de Condomínio') | (dados['Tipo'] == 'Casa de Vila')\nn2 = dados[selecao].shape[0]\nn2", "_____no_output_____" ], [ "# Selecione os imóveis com área entre 60 e 100 metros quadrados, incluindo os limites\n# 60 <= Area <= 100\nselecao = (dados['Area'] >= 60) & (dados['Area'] <= 100)\nn3 = dados[selecao].shape[0]\nn3", "_____no_output_____" ], [ "# Selecione os imóveis que tenham pelo menos 4 quartos e aluguel menor que R$ 2.000,00\nselecao = (dados['Quartos'] >= 4) & (dados['Valor'] < 2000)\nn4 = dados[selecao].shape[0]\nn4", "_____no_output_____" ], [ "print(\"Nº de imóveis classificados com tipo 'Apartamento' -> {}\".format(n1))\nprint(\"Nº de imóveis classificados com tipos 'Casa', 'Casa de Condomínio' e 'Casa de Vila' -> {}\".format(n2))\nprint(\"Nº de imóveis com área entre 60 e 100 metros quadrados, incluindo os limites -> {}\".format(n3))\nprint(\"Nº de imóveis que tenham pelo menos 4 quartos e aluguel menor que R$ 2.000,00 -> {}\".format(n4))", "Nº de imóveis classificados com tipo 'Apartamento' -> 19532\nNº de imóveis classificados com tipos 'Casa', 'Casa de Condomínio' e 'Casa de Vila' -> 2212\nNº de imóveis com área entre 60 e 100 metros quadrados, incluindo os limites -> 8719\nNº de imóveis que tenham pelo menos 4 quartos e aluguel menor que R$ 2.000,00 -> 41\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d058156f43016d15ecfa72704691195db5ab00cd
4,467
ipynb
Jupyter Notebook
jupyterhub/notebooks/gpu/10_Optimize_Model_Server.ipynb
just4jc/pipeline
3c7a4fa59c6363833766d2b55fa55ace6b6af351
[ "Apache-2.0" ]
1
2018-03-13T09:46:17.000Z
2018-03-13T09:46:17.000Z
jupyterhub/notebooks/gpu/10_Optimize_Model_Server.ipynb
just4jc/pipeline
3c7a4fa59c6363833766d2b55fa55ace6b6af351
[ "Apache-2.0" ]
null
null
null
jupyterhub/notebooks/gpu/10_Optimize_Model_Server.ipynb
just4jc/pipeline
3c7a4fa59c6363833766d2b55fa55ace6b6af351
[ "Apache-2.0" ]
2
2018-08-19T15:05:18.000Z
2020-08-13T16:31:48.000Z
33.088889
263
0.610925
[ [ [ "# Tune TensorFlow Serving\n## Guidelines\n### CPU-only\nIf your system is CPU-only (no GPU), then consider the following values: \n* `num_batch_threads` equal to the number of CPU cores\n* `max_batch_size` to infinity (ie. MAX_INT)\n* `batch_timeout_micros` to 0. \n\nThen experiment with batch_timeout_micros values in the 1-10 millisecond (1000-10000 microsecond) range, while keeping in mind that 0 may be the optimal value.\n\n### GPU\n\nIf your model uses a GPU device for part or all of your its inference work, consider the following value:\n\n* `num_batch_threads` to the number of CPU cores.\n* `batch_timeout_micros` to infinity while tuning `max_batch_size` to achieve the desired balance between throughput and average latency. Consider values in the hundreds or thousands.\n\nFor online serving, tune `batch_timeout_micros` to rein in tail latency. \n\nThe idea is that batches normally get filled to max_batch_size, but occasionally when there is a lapse in incoming requests, to avoid introducing a latency spike it makes sense to process whatever's in the queue even if it represents an underfull batch. \n\nThe best value for `batch_timeout_micros` is typically a few milliseconds, and depends on your context and goals. \n\nZero is a value to consider as it works well for some workloads. For bulk-processing batch jobs, choose a large value, perhaps a few seconds, to ensure good throughput but not wait too long for the final (and likely underfull) batch.", "_____no_output_____" ], [ "## Close TensorFlow Serving and Load Test Terminals", "_____no_output_____" ], [ "## Open a Terminal through Jupyter Notebook \n### (Menu Bar -> File -> New...)\n![Jupyter Terminal](http://pipeline.io/img/jupyter-terminal.png)\n", "_____no_output_____" ], [ "## Enable Request Batching", "_____no_output_____" ], [ "## Start TensorFlow Serving in Separate Terminal\nThe params are as follows:\n* `port` for TensorFlow Serving (int)\n* `model_name` (anything)\n* `model_base_path` (/path/to/model/ above all versioned sub-directories)\n* `enable_batching` (true|false)\n\n\n```\ntensorflow_model_server \\\n --port=9000 \\\n --model_name=linear \\\n --model_base_path=/root/models/linear_fully_optimized/cpu \\\n --batching_parameters_file=/root/config/tf_serving/batch_config.txt \\ \n --enable_batching=true \\\n```\n\n### `batch_config.txt`\n* `num_batch_threads` (usually equal to the number of CPU cores or a multiple thereof)\n* `max_batch_size` (# of requests - start with infinity, tune down to find the right balance between latency and throughput)\n* `batch_timeout_micros` (minimum batch window duration)\n```\nnum_batch_threads { value: 100 }\nmax_batch_size { value: 99999999 }\nbatch_timeout_micros { value: 100000 }\n```\n\n", "_____no_output_____" ], [ "## Start Load Test in the Terminal\n```\nloadtest high\n```\n\nNotice the throughput and avg/min/max latencies:\n```\nsummary ... = 301.1/s Avg: 227 Min: 3 Max: 456 Err: 0 (0.00%)\n```", "_____no_output_____" ], [ "## Modify Request Batching Parameters, Repeat Load Test\nGain intuition on the performance impact of changing the request batching parameters.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0581821b89eda9e359cc4763d39dd5e859faa58
109,085
ipynb
Jupyter Notebook
immune_CD45enriched_load_detect_doublets.ipynb
ventolab/HGDA
baacdf627f1c5fdd4712db1c98d94ab175e33fdf
[ "MIT" ]
null
null
null
immune_CD45enriched_load_detect_doublets.ipynb
ventolab/HGDA
baacdf627f1c5fdd4712db1c98d94ab175e33fdf
[ "MIT" ]
null
null
null
immune_CD45enriched_load_detect_doublets.ipynb
ventolab/HGDA
baacdf627f1c5fdd4712db1c98d94ab175e33fdf
[ "MIT" ]
null
null
null
80.26858
232
0.697731
[ [ [ "import scrublet as scr\nimport numpy as np\nimport pandas as pd\nimport scanpy as sc\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport scipy\n\n\ndef MovePlots(plotpattern, subplotdir):\n os.system('mkdir -p '+str(sc.settings.figdir)+'/'+subplotdir)\n os.system('mv '+str(sc.settings.figdir)+'/*'+plotpattern+'** '+str(sc.settings.figdir)+'/'+subplotdir)\n\n\nsc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)\nsc.settings.figdir = './final-figures/'\nsc.logging.print_versions()\nsc.settings.set_figure_params(dpi=80) # low dpi (dots per inch) yields small inline figures\n\nsys.executable", "WARNING: If you miss a compact list, please try `print_header`!\n" ], [ "# Benjamini-Hochberg and Bonferroni FDR helper functions.\n\ndef bh(pvalues):\n \"\"\"\n Computes the Benjamini-Hochberg FDR correction.\n \n Input:\n * pvals - vector of p-values to correct\n \"\"\"\n pvalues = np.array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = np.empty(n)\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues\n\ndef bonf(pvalues):\n \"\"\"\n Computes the Bonferroni FDR correction.\n \n Input:\n * pvals - vector of p-values to correct\n \"\"\"\n new_pvalues = np.array(pvalues) * len(pvalues)\n new_pvalues[new_pvalues>1] = 1\n return new_pvalues", "_____no_output_____" ] ], [ [ "Scrumblet\n(Courtesy of K Polansky)\n\nTwo-step doublet score processing, mirroring the approach from Popescu et al. https://www.nature.com/articles/s41586-019-1652-y which was closely based on Pijuan-Sala et al. https://www.nature.com/articles/s41586-019-0933-9\n\nThe first step starts with some sort of doublet score, e.g. Scrublet, and ends up with a per-cell p-value (with significant values marking doublets). For each sample individually:\n\nrun Scrublet to obtain each cell's score\novercluster the manifold - run a basic Scanpy pipeline up to clustering, then additionally cluster each cluster separately\ncompute per-cluster Scrublet scores as the median of the observed values, and use those going forward\nidentify p-values:\ncompute normal distribution parameters: centered at the median of the scores, with a MAD-derived standard deviation\nthe score distribution is zero-truncated, so as per the paper I only use above-median values to compute the MAD\nK deviates from the paper a bit, at least the exact wording captured within it, and multiply the MAD by 1.4826 to obtain a literature-derived normal distribution standard deviation estimate\nFDR-correct the p-values via Benjamini-Hochberg\nwrite out all this doublet info into CSVs for later use\n\nNOTE: The second step is performed later, in a multi-sample space", "_____no_output_____" ] ], [ [ "path_to_data = '/nfs/users/nfs_l/lg18/team292/lg18/gonads/data/scRNAseq/FCA/rawdata/'", "_____no_output_____" ], [ "metadata = pd.read_csv(path_to_data + 'immune_meta.csv', index_col=0)\nmetadata['process'].value_counts()", "_____no_output_____" ], [ "# Select process = CD45+ \nmetadata_enriched = metadata[metadata['process'] == 'CD45+']\nmetadata_enriched", "_____no_output_____" ], [ "metadata_enriched['stage'] = metadata_enriched['stage'].astype('str')\nplotmeta = list(metadata_enriched.columns)\nplotmeta.append('sample')\nprint('Number of samples: ', metadata_enriched.index.size)", "Number of samples: 11\n" ], [ "#there's loads of clustering going on, so set verbosity low unless you enjoy walls of text\nsc.settings.verbosity = 0 # verbosity: errors (0), warnings (1), info (2), hints (3)\n\nscorenames = ['scrublet_score','scrublet_cluster_score','zscore','bh_pval','bonf_pval']\nif not os.path.exists('scrublet-scores'):\n os.makedirs('scrublet-scores')\n #loop over the subfolders of the rawdata folder\n\nsamples = metadata_enriched.index.to_list()\n\nfor sample in list(reversed(samples)):\n print(sample)\n #import data\n adata_sample = sc.read_10x_mtx(path_to_data + sample + '/filtered_feature_bc_matrix/',cache=True)\n adata_sample.var_names_make_unique()\n #rename cells to SAMPLE_BARCODE\n adata_sample.obs_names = [sample+'_'+i for i in adata_sample.obs_names]\n #do some early filtering to retain meaningful cells for doublet inspection\n sc.pp.filter_cells(adata_sample, min_genes=200)\n sc.pp.filter_genes(adata_sample, min_cells=3)\n #convert to lower to be species agnostic: human mito start with MT-, mouse with mt-\n mito_genes = [name for name in adata_sample.var_names if name.lower().startswith('mt-')]\n # for each cell compute fraction of counts in mito genes vs. all genes\n # the `.A1` is only necessary as X is sparse (to transform to a dense array after summing)\n adata_sample.obs['percent_mito'] = np.sum(\n adata_sample[:, mito_genes].X, axis=1).A1 / np.sum(adata_sample.X, axis=1).A1\n adata_sample = adata_sample[adata_sample.obs['percent_mito'] < 0.2, :]\n\n #set up and run Scrublet, seeding for replicability\n np.random.seed(0)\n scrub = scr.Scrublet(adata_sample.X)\n doublet_scores, predicted_doublets = scrub.scrub_doublets(verbose=False)\n adata_sample.obs['scrublet_score'] = doublet_scores\n\n #overcluster prep. run turbo basic scanpy pipeline\n sc.pp.normalize_per_cell(adata_sample, counts_per_cell_after=1e4)\n sc.pp.log1p(adata_sample)\n sc.pp.highly_variable_genes(adata_sample, min_mean=0.0125, max_mean=3, min_disp=0.5)\n adata_sample = adata_sample[:, adata_sample.var['highly_variable']]\n sc.pp.scale(adata_sample, max_value=10)\n sc.tl.pca(adata_sample, svd_solver='arpack')\n sc.pp.neighbors(adata_sample)\n #overclustering proper - do basic clustering first, then cluster each cluster\n sc.tl.leiden(adata_sample)\n adata_sample.obs['leiden'] = [str(i) for i in adata_sample.obs['leiden']]\n for clus in np.unique(adata_sample.obs['leiden']):\n adata_sub = adata_sample[adata_sample.obs['leiden']==clus].copy()\n sc.tl.leiden(adata_sub)\n adata_sub.obs['leiden'] = [clus+','+i for i in adata_sub.obs['leiden']]\n adata_sample.obs.loc[adata_sub.obs_names,'leiden'] = adata_sub.obs['leiden']\n\n #compute the cluster scores - the median of Scrublet scores per overclustered cluster\n for clus in np.unique(adata_sample.obs['leiden']):\n adata_sample.obs.loc[adata_sample.obs['leiden']==clus, 'scrublet_cluster_score'] = \\\n np.median(adata_sample.obs.loc[adata_sample.obs['leiden']==clus, 'scrublet_score'])\n #now compute doublet p-values. figure out the median and mad (from above-median values) for the distribution\n med = np.median(adata_sample.obs['scrublet_cluster_score'])\n mask = adata_sample.obs['scrublet_cluster_score']>med\n mad = np.median(adata_sample.obs['scrublet_cluster_score'][mask]-med)\n #let's do a one-sided test. the Bertie write-up does not address this but it makes sense\n zscores = (adata_sample.obs['scrublet_cluster_score'].values - med) / (1.4826 * mad)\n adata_sample.obs['zscore'] = zscores\n pvals = 1-scipy.stats.norm.cdf(zscores)\n adata_sample.obs['bh_pval'] = bh(pvals)\n adata_sample.obs['bonf_pval'] = bonf(pvals)\n\n #create results data frame for single sample and copy stuff over from the adata object\n scrublet_sample = pd.DataFrame(0, index=adata_sample.obs_names, columns=scorenames)\n for score in scorenames:\n scrublet_sample[score] = adata_sample.obs[score]\n #write out complete sample scores\n scrublet_sample.to_csv('scrublet-scores/'+sample+'.csv')", "FCA_GND8784459\n" ] ], [ [ "#### End of notebook", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d058283c3b3f5391480560d88136ec8ec51b1cea
50,580
ipynb
Jupyter Notebook
1.2-The Basics.ipynb
unmrds/cc-python
65a02aefc6428e5ed6e8ba8be1a6c179331a1e57
[ "Apache-2.0" ]
2
2020-03-27T15:08:03.000Z
2020-06-03T20:17:51.000Z
1.2-The Basics.ipynb
unmrds/cc-python
65a02aefc6428e5ed6e8ba8be1a6c179331a1e57
[ "Apache-2.0" ]
null
null
null
1.2-The Basics.ipynb
unmrds/cc-python
65a02aefc6428e5ed6e8ba8be1a6c179331a1e57
[ "Apache-2.0" ]
2
2017-09-13T02:23:19.000Z
2021-11-12T17:31:16.000Z
29.321739
1,686
0.514215
[ [ [ "## The Basics\n\nAt the core of Python (and any programming language) there are some key characteristics of how a program is structured that enable the proper execution of that program. These characteristics include the structure of the code itself, the core data types from which others are built, and core operators that modify objects or create new ones. From these raw materials more complex commands, functions, and modules are built.\nFor guidance on recommended Python structure refer to the [Python Style Guide](https://www.python.org/dev/peps/pep-0008).\n\n# Examples: Variables and Data Types\n\n## The Interpreter", "_____no_output_____" ] ], [ [ "# The interpreter can be used as a calculator, and can also echo or concatenate strings.\n\n3 + 3", "_____no_output_____" ], [ "3 * 3", "_____no_output_____" ], [ "3 ** 3", "_____no_output_____" ], [ "3 / 2 # classic division - output is a floating point number", "_____no_output_____" ], [ "# Use quotes around strings, single or double, but be consistent to the extent possible\n\n'dogs'", "_____no_output_____" ], [ "\"dogs\"", "_____no_output_____" ], [ "\"They're going to the beach\"", "_____no_output_____" ], [ "'He said \"I like mac and cheese\"'", "_____no_output_____" ], [ "# sometimes you can't escape the escape\n'He said \"I\\'d like mac and cheese\"'", "_____no_output_____" ], [ "# + operator can be used to concatenate strings\n\n'dogs' + \"cats\"", "_____no_output_____" ], [ "print('Hello World!')", "Hello World!\n" ] ], [ [ "### Try It Yourself\n\nGo to the section _4.4. Numeric Types_ in the Python 3 documentation at <https://docs.python.org/3.4/library/stdtypes.html>. The table in that section describes different operators - try some!\n\nWhat is the difference between the different division operators (`/`, `//`, and `%`)?\n\n## Variables\n\nVariables allow us to store values for later use. ", "_____no_output_____" ] ], [ [ "a = 5\nb = 10\na + b", "_____no_output_____" ] ], [ [ "Variables can be reassigned:", "_____no_output_____" ] ], [ [ "b = 38764289.1097\na + b", "_____no_output_____" ] ], [ [ "The ability to reassign variable values becomes important when iterating through groups of objects for batch processing or other purposes. In the example below, the value of `b` is dynamically updated every time the `while` loop is executed:", "_____no_output_____" ] ], [ [ "a = 5\nb = 10\nwhile b > a:\n print(\"b=\"+str(b))\n b = b-1", "b=10\nb=9\nb=8\nb=7\nb=6\n" ] ], [ [ "Variable data types can be inferred, so Python does not require us to declare the data type of a variable on assignment.", "_____no_output_____" ] ], [ [ "a = 5\ntype(a)", "_____no_output_____" ] ], [ [ "is equivalent to", "_____no_output_____" ] ], [ [ "a = int(5)\ntype(a)", "_____no_output_____" ], [ "c = 'dogs'\nprint(type(c))\n\nc = str('dogs')\nprint(type(c))", "<class 'str'>\n<class 'str'>\n" ] ], [ [ "There are cases when we may want to declare the data type, for example to assign a different data type from the default that will be inferred. Concatenating strings provides a good example. ", "_____no_output_____" ] ], [ [ "customer = 'Carol'\npizzas = 2\nprint(customer + ' ordered ' + pizzas + ' pizzas.')", "_____no_output_____" ] ], [ [ "Above, Python has inferred the type of the variable `pizza` to be an integer. Since strings can only be concatenated with other strings, our print statement generates an error. There are two ways we can resolve the error:\n\n1. Declare the `pizzas` variable as type string (`str`) on assignment or\n2. Re-cast the `pizzas` variable as a string within the `print` statement.", "_____no_output_____" ] ], [ [ "customer = 'Carol'\npizzas = str(2)\nprint(customer + ' ordered ' + pizzas + ' pizzas.')", "Carol ordered 2 pizzas.\n" ], [ "customer = 'Carol'\npizzas = 2\nprint(customer + ' ordered ' + str(pizzas) + ' pizzas.')", "Carol ordered 2 pizzas.\n" ] ], [ [ "Given the following variable assignments:\n\n```\nx = 12\ny = str(14)\nz = donuts\n```\n\nPredict the output of the following:\n\n1. `y + z`\n2. `x + y`\n3. `x + int(y)`\n4. `str(x) + y`\n\nCheck your answers in the interpreter.\n\n### Variable Naming Rules\n\nVariable names are case senstive and:\n\n1. Can only consist of one \"word\" (no spaces).\n2. Must begin with a letter or underscore character ('\\_').\n3. Can only use letters, numbers, and the underscore character.\n\nWe further recommend using variable names that are meaningful within the context of the script and the research.\n\n\n## Reading Files\n\nWe can accomplish a lot by assigning variables within our code as demonstrated above, but often we are interested in working with objects and data that exist in other files and directories on our system.\n\nWhen we want to read data files into a script, we do so by assigning the content of the file to a variable. This stores the data in memory and lets us perform processes and analyses on the data without changing the content of the source file.\n\nThere are several ways to read files in Python - many libraries have methods for reading text, Excel and Word documents, PDFs, etc. This morning we're going to demonstrate using the ```read()``` and ```readlines()``` method in the standard library, and the Pandas```read_csv()``` function.", "_____no_output_____" ] ], [ [ "# Read unstructured text\n\n# One way is to open the whole file as a block\nfile_path = \"./beowulf\" # We can save the path to the file as a variable\nfile_in = open(file_path, \"r\") # Options are 'r', 'w', and 'a' (read, write, append)\nbeowulf_a = file_in.read()\nfile_in.close()\nprint(beowulf_a)", "BEOWULF.\n\nI.\n\nTHE LIFE AND DEATH OF SCYLD.\n\n\n{The famous race of Spear-Danes.}\n\n Lo! the Spear-Danes' glory through splendid achievements\n The folk-kings' former fame we have heard of,\n How princes displayed then their prowess-in-battle.\n\n{Scyld, their mighty king, in honor of whom they are often called\nScyldings. He is the great-grandfather of Hrothgar, so prominent in the\npoem.}\n\n Oft Scyld the Scefing from scathers in numbers\n 5 From many a people their mead-benches tore.\n Since first he found him friendless and wretched,\n The earl had had terror: comfort he got for it,\n Waxed 'neath the welkin, world-honor gained,\n Till all his neighbors o'er sea were compelled to\n 10 Bow to his bidding and bring him their tribute:\n An excellent atheling! After was borne him\n\n{A son is born to him, who receives the name of Beowulf--a name afterwards\nmade so famous by the hero of the poem.}\n\n A son and heir, young in his dwelling,\n Whom God-Father sent to solace the people.\n He had marked the misery malice had caused them,\n 15 [1]That reaved of their rulers they wretched had erstwhile[2]\n Long been afflicted. The Lord, in requital,\n Wielder of Glory, with world-honor blessed him.\n Famed was Beowulf, far spread the glory\n Of Scyld's great son in the lands of the Danemen.\n" ], [ "# Another way is to read the file as a list of individual lines\n\nwith open(file_path, \"r\") as b:\n beowulf_b = b.readlines()\n\nprint(beowulf_b)", "['\\ufeffBEOWULF.\\n', '\\n', 'I.\\n', '\\n', 'THE LIFE AND DEATH OF SCYLD.\\n', '\\n', '\\n', '{The famous race of Spear-Danes.}\\n', '\\n', \" Lo! the Spear-Danes' glory through splendid achievements\\n\", \" The folk-kings' former fame we have heard of,\\n\", ' How princes displayed then their prowess-in-battle.\\n', '\\n', '{Scyld, their mighty king, in honor of whom they are often called\\n', 'Scyldings. He is the great-grandfather of Hrothgar, so prominent in the\\n', 'poem.}\\n', '\\n', ' Oft Scyld the Scefing from scathers in numbers\\n', ' 5 From many a people their mead-benches tore.\\n', ' Since first he found him friendless and wretched,\\n', ' The earl had had terror: comfort he got for it,\\n', \" Waxed 'neath the welkin, world-honor gained,\\n\", \" Till all his neighbors o'er sea were compelled to\\n\", ' 10 Bow to his bidding and bring him their tribute:\\n', ' An excellent atheling! After was borne him\\n', '\\n', '{A son is born to him, who receives the name of Beowulf--a name afterwards\\n', 'made so famous by the hero of the poem.}\\n', '\\n', ' A son and heir, young in his dwelling,\\n', ' Whom God-Father sent to solace the people.\\n', ' He had marked the misery malice had caused them,\\n', ' 15 [1]That reaved of their rulers they wretched had erstwhile[2]\\n', ' Long been afflicted. The Lord, in requital,\\n', ' Wielder of Glory, with world-honor blessed him.\\n', ' Famed was Beowulf, far spread the glory\\n', \" Of Scyld's great son in the lands of the Danemen.\"]\n" ], [ "# In order to get a similar printout to the first method, we use a for loop\n# to print line by line - more on for loops below!\n\nfor l in beowulf_b:\n print(l)", "BEOWULF.\n\n\n\nI.\n\n\n\nTHE LIFE AND DEATH OF SCYLD.\n\n\n\n\n\n{The famous race of Spear-Danes.}\n\n\n\n Lo! the Spear-Danes' glory through splendid achievements\n\n The folk-kings' former fame we have heard of,\n\n How princes displayed then their prowess-in-battle.\n\n\n\n{Scyld, their mighty king, in honor of whom they are often called\n\nScyldings. He is the great-grandfather of Hrothgar, so prominent in the\n\npoem.}\n\n\n\n Oft Scyld the Scefing from scathers in numbers\n\n 5 From many a people their mead-benches tore.\n\n Since first he found him friendless and wretched,\n\n The earl had had terror: comfort he got for it,\n\n Waxed 'neath the welkin, world-honor gained,\n\n Till all his neighbors o'er sea were compelled to\n\n 10 Bow to his bidding and bring him their tribute:\n\n An excellent atheling! After was borne him\n\n\n\n{A son is born to him, who receives the name of Beowulf--a name afterwards\n\nmade so famous by the hero of the poem.}\n\n\n\n A son and heir, young in his dwelling,\n\n Whom God-Father sent to solace the people.\n\n He had marked the misery malice had caused them,\n\n 15 [1]That reaved of their rulers they wretched had erstwhile[2]\n\n Long been afflicted. The Lord, in requital,\n\n Wielder of Glory, with world-honor blessed him.\n\n Famed was Beowulf, far spread the glory\n\n Of Scyld's great son in the lands of the Danemen.\n" ], [ "# We now have two variables with the content of our 'beowulf' file represented using two different data structures.\n# Why do you think we get the different outputs from the next two statements?\n\n# Beowulf text stored as one large string\nprint(\"As string:\", beowulf_a[0])\n\n# Beowulf text stored as a list of lines\nprint(\"As list of lines:\", beowulf_b[0])", "As string: \nAs list of lines: BEOWULF.\n\n" ], [ "# We can confirm our expectations by checking on the types of our two beowulf variables\nprint(type(beowulf_a))\nprint(type(beowulf_b))", "<class 'str'>\n<class 'list'>\n" ], [ "# Read CSV files using the Pandas read_csv method.\n# Note: Pandas also includes methods for reading Excel.\n\n# First we need to import the pandas library\nimport pandas as pd\n\n# Create a variable to hold the path to the file\nfpath = \"aaj1945_DataS1_Egg_shape_by_species_v2.csv\"\negg_data = pd.read_csv(fpath)", "_____no_output_____" ], [ "# We can get all kinds of info about the dataset\n\n# info() provides an overview of the structure\nprint(egg_data.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1400 entries, 0 to 1399\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Order 1400 non-null object \n 1 Family 1400 non-null object \n 2 MVZDatabase 1400 non-null object \n 3 Species 1396 non-null object \n 4 Asymmetry 1400 non-null float64\n 5 Ellipticity 1400 non-null float64\n 6 AvgLength (cm) 1400 non-null float64\n 7 Number of images 1400 non-null int64 \n 8 Number of eggs 1400 non-null int64 \ndtypes: float64(3), int64(2), object(4)\nmemory usage: 98.6+ KB\nNone\n" ], [ "# Look at the first five rows\negg_data.head()", "_____no_output_____" ], [ "# Names of columns\nprint(egg_data.columns.values)", "['Order' 'Family' 'MVZDatabase' 'Species' 'Asymmetry' 'Ellipticity'\n 'AvgLength (cm)' 'Number of images' 'Number of eggs']\n" ], [ "# Dimensions (number of rows and columns)\nprint(egg_data.shape)", "(1400, 9)\n" ], [ "# And much more! But as a final example we can perform operations on the data.\n# Descriptive statistics on the \"Number of eggs\" column\nprint(egg_data[\"Number of eggs\"].describe())", "count 1400.000000\nmean 35.125000\nstd 85.790347\nmin 1.000000\n25% 3.000000\n50% 8.000000\n75% 26.250000\nmax 1139.000000\nName: Number of eggs, dtype: float64\n" ], [ "# Or all of the columns in whole table with numeric data types:\nprint(egg_data.describe())", " Asymmetry Ellipticity AvgLength (cm) Number of images \\\ncount 1400.000000 1400.000000 1400.000000 1400.000000 \nmean 0.148230 0.384384 3.426853 9.320714 \nstd 0.071228 0.089594 2.161549 20.747693 \nmin 0.001400 0.096700 1.196000 1.000000 \n25% 0.104800 0.325775 1.958925 1.000000 \n50% 0.141750 0.377400 2.581150 2.000000 \n75% 0.184825 0.435075 4.323650 8.000000 \nmax 0.484700 0.723700 23.870000 300.000000 \n\n Number of eggs \ncount 1400.000000 \nmean 35.125000 \nstd 85.790347 \nmin 1.000000 \n25% 3.000000 \n50% 8.000000 \n75% 26.250000 \nmax 1139.000000 \n" ] ], [ [ "### Structure\n\nNow that we have practiced assigning variables and reading information from files, we will have a look at concepts that are key to developing processes to use and analyze this information.\n\n#### Blocks\n\nThe structure of a Python program is pretty simple:\nBlocks of code are defined using indentation. Code that is at a lower level of indentation is not considerd part of a block. Indentation can be defined using spaces or tabs (spaces are recommended by the style guide), but be consistent (and prepared to defend your choice). As we will see, code blocks define the boundaries of sets of commands that fit within a given section of code. This indentation model for defining blocks of code significantly increases the readabiltiy of Python code.\n\nFor example:\n\n >>>a = 5\n >>>b = 10\n >>>while b > a:\n ... print(\"b=\"+str(b))\n ... b = b-1\n >>>print(\"I'm outside the block\")", "_____no_output_____" ], [ "#### Comments & Documentation\n\nYou can (and should) also include documentation and comments in the code your write - both for yourself, and potential future users (including yourself). Comments are pretty much any content on a line that follows a `#` symbol (unless it is between quotation marks. For example:\n\n >>># we're going to do some math now\n >>>yae = 5 # the number of votes in favor\n >>>nay = 10 # the number of votes against\n >>>proportion = yae / nay # the proportion of votes in favor\n >>>print(proportion)\n", "_____no_output_____" ], [ "When you are creating functions or classes (a bit more on what these are in a bit) you can also create what are called *doc strings* that provide a defined location for content that is used to generate the `help()` information highlighted above and is also used by other systems for the automatic generation of documentation for packages that contain these *doc strings*. Creating a *doc string* is simple - just create a single or multi-line text string (more on this soon) that starts on the first indented line following the start of the definition of the function or class. For example: \n\n >>># we're going to create a documented function and then access the information about the function\n >>>def doc_demo(some_text=\"Ill skewer yer gizzard, ye salty sea bass\"):\n ... \"\"\"This function takes the provided text and prints it out in Pirate\n ... \n ... If a string is not provided for `some_text` a default message will be displayed\n ... \"\"\"\n ... out_string = \"Ahoy Matey. \" + some_text\n ... print(out_string)\n >>>help(doc_demo)\n >>>doc_demo()\n >>>doc_demo(\"Sail ho!\")", "_____no_output_____" ], [ "### Standard Objects\n\nAny programming language has at its foundation a collection of *types* or in Python's terminology *objects*. The standard objects of Python consist of the following:\n\n* **Numbers** - integer, floating point, complex, and multiple-base defined numeric values\n* **Strings** - **immutable** strings of characters, numbers, and symbols that are bounded by single- or double-quotes\n* **Lists** - an ordered collection of objects that is bounded by square-brackets - `[]`. Elements in lists are extracted or referenced by their position in the list. For example, `my_list[0]` refers to the first item in the list, `my_list[5]` the sixth, and `my_list[-1]` to the last item in the list. \n* **Dictionaries** - an unordered collection of objects that are referenced by *keys* that allow for referring to those objexts by reference to those keys. Dictionaries are bounded by curley-brackets - `{}` with each element of the dictionary consisting of a *key* (string) and a *value* (object) separated by a colon `:`. Elements of a dictionary are extracted or referenced using their keys. for example:\n\n my_dict = {\"key1\":\"value1\", \"key2\":36, \"key3\":[1,2,3]}\n my_dict['key1'] returns \"value1\"\n my_dict['key3'] returns [1,2,3]\n\n* **Tuples** - **immutable** lists that are bounded by parentheses = `()`. Referencing elements in a tuple is the same as referencing elements in a list above. \n* **Files** - objects that represent external files on the file system. Programs can interact with (e.g. read, write, append) external files through their representative file objects in the program.\n* **Sets** - unordered, collections of **immutable** objects (i.e. ints, floats, strings, and tuples) where membership in the set and uniqueness within the set are defining characteristics of the member objects. Sets are created using the `set` function on a sequence of objects. A specialized list of operators on sets allow for identifying *union*, *intersection*, and *difference* (among others) between sets. \n* **Other core types** - Booleans, types, `None`\n* **Program unit types** - *functions*, *modules*, and *classes* for example\n* **Implementation-related types** (not covered in this workshop)\n\nThese objects have their own sets of related methods (as we saw in the `help()` examples above) that enable their creation, and operations upon them.", "_____no_output_____" ] ], [ [ "# Fun with types\n\nthis = 12\nthat = 15\nthe_other = \"27\"\nmy_stuff = [this,that,the_other,[\"a\",\"b\",\"c\",4]]\nmore_stuff = {\n \"item1\": this, \n \"item2\": that, \n \"item3\": the_other, \n \"item4\": my_stuff\n}\nthis + that\n\n# this won't work ...\n# this + that + the_other\n\n# ... but this will ...\nthis + that + int(the_other)\n\n# ...and this too\nstr(this) + str(that) + the_other", "_____no_output_____" ] ], [ [ "## Lists\n\n<https://docs.python.org/3/library/stdtypes.html?highlight=lists#list>\n\nLists are a type of collection in Python. Lists allow us to store sequences of items that are typically but not always similar. All of the following lists are legal in Python:", "_____no_output_____" ] ], [ [ "# Separate list items with commas!\n\nnumber_list = [1, 2, 3, 4, 5]\nstring_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']\ncombined_list = [1, 2, 'oranges', 3.14, 'peaches', 'grapes', 99.19876]\n\n# Nested lists - lists of lists - are allowed.\n\nlist_of_lists = [[1, 2, 3], \n ['oranges', 'grapes', 8], \n [['small list'], \n ['bigger', 'list', 55], \n ['url_1', 'url_2']\n ]\n ]", "_____no_output_____" ] ], [ [ "There are multiple ways to create a list:", "_____no_output_____" ] ], [ [ "# Create an empty list\n\nempty_list = []\n\n# As we did above, by using square brackets around a comma-separated sequence of items\n\nnew_list = [1, 2, 3]\n\n# Using the type constructor\n\nconstructed_list = list('purple')\n\n# Using a list comprehension\n\nresult_list = [i for i in range(1, 20)]", "_____no_output_____" ] ], [ [ "We can inspect our lists:", "_____no_output_____" ] ], [ [ "empty_list", "_____no_output_____" ], [ "new_list", "_____no_output_____" ], [ "result_list", "_____no_output_____" ], [ "constructed_list", "_____no_output_____" ] ], [ [ "The above output for `constructed_list` may seem odd. Referring to the documentation, we see that the argument to the type constructor is an _iterable_, which according to the documentation is \"An object capable of returning its members one at a time.\" In our construtor statement above\n\n```\n# Using the type constructor\n\nconstructed_list = list('purple')\n```\n\nthe word 'purple' is the object - in this case a ```str``` (string) consisting of the word 'purple' - that when used to construct a list returns its members (individual letters) one at a time.\n\nCompare the outputs below:", "_____no_output_____" ] ], [ [ "constructed_list_int = list(123)", "_____no_output_____" ], [ "constructed_list_str = list('123')\nconstructed_list_str", "_____no_output_____" ] ], [ [ "Lists in Python are:\n\n* mutable - the list and list items can be changed\n* ordered - list items keep the same \"place\" in the list\n\n_Ordered_ here does not mean sorted. The list below is printed with the numbers in the order we added them to the list, not in numeric order:", "_____no_output_____" ] ], [ [ "ordered = [3, 2, 7, 1, 19, 0]\nordered", "_____no_output_____" ], [ "# There is a 'sort' method for sorting list items as needed:\n\nordered.sort()\nordered", "_____no_output_____" ] ], [ [ "Info on additional list methods is available at <https://docs.python.org/3/library/stdtypes.html?highlight=lists#mutable-sequence-types>\n\nBecause lists are ordered, it is possible to access list items by referencing their positions. Note that the position of the first item in a list is 0 (zero), not 1!", "_____no_output_____" ] ], [ [ "string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']", "_____no_output_____" ], [ "string_list[0]", "_____no_output_____" ], [ "# We can use positions to 'slice' or select sections of a list:\n\nstring_list[3:] # start at index '3' and continue to the end", "_____no_output_____" ], [ "string_list[:3] # start at index '0' and go up to, but don't include index '3'", "_____no_output_____" ], [ "string_list[1:4] # start at index '1' and go up to and don't include index '4'", "_____no_output_____" ], [ "# If we don't know the position of a list item, we can use the 'index()' method to find out.\n# Note that in the case of duplicate list items, this only returns the position of the first one:\n\nstring_list.index('pears')", "_____no_output_____" ], [ "string_list.append('oranges')", "_____no_output_____" ], [ "string_list", "_____no_output_____" ], [ "string_list.index('oranges')", "_____no_output_____" ], [ "# one more time with lists and dictionaries\nlist_ex1 = my_stuff[0] + my_stuff[1] + int(my_stuff[2])\nprint(list_ex1)\n\n# we can use parentheses to split a continuous group of commands over multiple lines\nlist_ex2 = (\n str(my_stuff[0]) \n + str(my_stuff[1]) \n + my_stuff[2] \n + my_stuff[3][0]\n)\nprint(list_ex2)\n\ndict_ex1 = (\n more_stuff['item1']\n + more_stuff['item2']\n + int(more_stuff['item3'])\n)\nprint(dict_ex1)\n\ndict_ex2 = (\n str(more_stuff['item1'])\n + str(more_stuff['item2'])\n + more_stuff['item3']\n)\nprint(dict_ex2)\n\n", "54\n121527a\n54\n121527\n" ], [ "# Now try it yourself ...\n# print out the phrase \"The answer: 42\" using the following \n# variables and one or more of your own and the 'print()' function\n# (remember spaces are characters as well)\n\nstart = \"The\"\nanswer = 42\n", "_____no_output_____" ] ], [ [ "### Operators\n\nIf *objects* are the nouns, operators are the verbs of a programming language. We've already seen examples of some operators: *assignment* with the `=` operator, *arithmetic* addition *and* string concatenation with the `+` operator, *arithmetic* division with the `/` and `-` operators, and *comparison* with the `>` operator. Different object types have different operators that may be used with them. The [Python Documentation](https://docs.python.org/3/library/stdtypes.html) provides detailed information about the operators and their functions as they relate to the standard object types described above. \n\n### Flow Control and Logical Tests\n\nFlow control commands allow for the dynamic execution of parts of the program based upon logical conditions, or processing of objects within an *iterable* object (like a list or dictionary). Some key flow control commands in python include:\n\n* `while-else` loops that continue to run until the termination test is `False` or a `break` command is issued within the loop:\n\n done = False\n i = 0\n while not done:\n i = i+1\n if i > 5: done = True\n\n* `if-elif-else` statements defined alternative blocks of code that are executed if a test condition is met:\n\n do_something = \"what?\"\n if do_something == \"what?\":\n print(do_something)\n elif do_something == \"where?\":\n print(\"Where are we going?\")\n else:\n print(\"I guess nothing is going to happen\")\n \n* `for` loops allow for repeated execution of a block of code for each item in a python sequence such as a list or dictionary. For example:\n\n my_stuff = ['a', 'b', 'c']\n for item in my_stuff:\n print(item)\n \n a\n b\n c\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d058447f13223e5cb8fcc4e661638b1b77895532
280,688
ipynb
Jupyter Notebook
tutorial/source/bo.ipynb
FlorianWilhelm/pyro
eb4909702af275aed7d4516cc6df63691f401b88
[ "Apache-2.0" ]
1
2021-02-08T22:53:23.000Z
2021-02-08T22:53:23.000Z
tutorial/source/bo.ipynb
Jimmy-INL/pyro
fcd56f986264c7d4aac75839940e157fa80ea2f1
[ "Apache-2.0" ]
null
null
null
tutorial/source/bo.ipynb
Jimmy-INL/pyro
fcd56f986264c7d4aac75839940e157fa80ea2f1
[ "Apache-2.0" ]
null
null
null
681.281553
248,952
0.944821
[ [ [ "# Bayesian Optimization\n\n[Bayesian optimization](https://en.wikipedia.org/wiki/Bayesian_optimization) is a powerful strategy for minimizing (or maximizing) objective functions that are costly to evaluate. It is an important component of [automated machine learning](https://en.wikipedia.org/wiki/Automated_machine_learning) toolboxes such as [auto-sklearn](https://automl.github.io/auto-sklearn/stable/), [auto-weka](http://www.cs.ubc.ca/labs/beta/Projects/autoweka/), and [scikit-optimize](https://scikit-optimize.github.io/), where Bayesian optimization is used to select model hyperparameters. Bayesian optimization is used for a wide range of other applications as well; as cataloged in the review [2], these include interactive user-interfaces, robotics, environmental monitoring, information extraction, combinatorial optimization, sensor networks, adaptive Monte Carlo, experimental design, and reinforcement learning.\n\n## Problem Setup\n\nWe are given a minimization problem\n\n$$ x^* = \\text{arg}\\min \\ f(x), $$\n\nwhere $f$ is a fixed objective function that we can evaluate pointwise. \nHere we assume that we do _not_ have access to the gradient of $f$. We also\nallow for the possibility that evaluations of $f$ are noisy.\n\nTo solve the minimization problem, we will construct a sequence of points $\\{x_n\\}$ that converge to $x^*$. Since we implicitly assume that we have a fixed budget (say 100 evaluations), we do not expect to find the exact minumum $x^*$: the goal is to get the best approximate solution we can given the allocated budget.\n\nThe Bayesian optimization strategy works as follows:\n\n1. Place a prior on the objective function $f$. Each time we evaluate $f$ at a new point $x_n$, we update our model for $f(x)$. This model serves as a surrogate objective function and reflects our beliefs about $f$ (in particular it reflects our beliefs about where we expect $f(x)$ to be close to $f(x^*)$). Since we are being Bayesian, our beliefs are encoded in a posterior that allows us to systematically reason about the uncertainty of our model predictions.\n\n2. Use the posterior to derive an \"acquisition\" function $\\alpha(x)$ that is easy to evaluate and differentiate (so that optimizing $\\alpha(x)$ is easy). In contrast to $f(x)$, we will generally evaluate $\\alpha(x)$ at many points $x$, since doing so will be cheap.\n\n3. Repeat until convergence:\n\n + Use the acquisition function to derive the next query point according to\n $$ x_{n+1} = \\text{arg}\\min \\ \\alpha(x). $$\n\n + Evaluate $f(x_{n+1})$ and update the posterior.\n\nA good acquisition function should make use of the uncertainty encoded in the posterior to encourage a balance between exploration&mdash;querying points where we know little about $f$&mdash;and exploitation&mdash;querying points in regions we have good reason to think $x^*$ may lie. As the iterative procedure progresses our model for $f$ evolves and so does the acquisition function. If our model is good and we've chosen a reasonable acquisition function, we expect that the acquisition function will guide the query points $x_n$ towards $x^*$.\n\nIn this tutorial, our model for $f$ will be a Gaussian process. In particular we will see how to use the [Gaussian Process module](http://docs.pyro.ai/en/0.3.1/contrib.gp.html) in Pyro to implement a simple Bayesian optimization procedure.", "_____no_output_____" ] ], [ [ "import matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.autograd as autograd\nimport torch.optim as optim\nfrom torch.distributions import constraints, transform_to\n\nimport pyro\nimport pyro.contrib.gp as gp\n\nassert pyro.__version__.startswith('1.5.2')\npyro.set_rng_seed(1)", "_____no_output_____" ] ], [ [ "## Define an objective function\n\nFor the purposes of demonstration, the objective function we are going to consider is the [Forrester et al. (2008) function](https://www.sfu.ca/~ssurjano/forretal08.html):\n\n$$f(x) = (6x-2)^2 \\sin(12x-4), \\quad x\\in [0, 1].$$\n\nThis function has both a local minimum and a global minimum. The global minimum is at $x^* = 0.75725$.", "_____no_output_____" ] ], [ [ "def f(x):\n return (6 * x - 2)**2 * torch.sin(12 * x - 4)", "_____no_output_____" ] ], [ [ "Let's begin by plotting $f$.", "_____no_output_____" ] ], [ [ "x = torch.linspace(0, 1)\nplt.figure(figsize=(8, 4))\nplt.plot(x.numpy(), f(x).numpy())\nplt.show()", "_____no_output_____" ] ], [ [ "## Setting a Gaussian Process prior", "_____no_output_____" ], [ "[Gaussian processes](https://en.wikipedia.org/wiki/Gaussian_process) are a popular choice for a function priors due to their power and flexibility. The core of a Gaussian Process is its covariance function $k$, which governs the similarity of $f(x)$ for pairs of input points. Here we will use a Gaussian Process as our prior for the objective function $f$. Given inputs $X$ and the corresponding noisy observations $y$, the model takes the form\n\n$$f\\sim\\mathrm{MultivariateNormal}(0,k(X,X)),$$\n\n$$y\\sim f+\\epsilon,$$\n\nwhere $\\epsilon$ is i.i.d. Gaussian noise and $k(X,X)$ is a covariance matrix whose entries are given by $k(x,x^\\prime)$ for each pair of inputs $(x,x^\\prime)$.\n\nWe choose the [Matern](https://en.wikipedia.org/wiki/Mat%C3%A9rn_covariance_function) kernel with $\\nu = \\frac{5}{2}$ (as suggested in reference [1]). Note that the popular [RBF](https://en.wikipedia.org/wiki/Radial_basis_function_kernel) kernel, which is used in many regression tasks, results in a function prior whose samples are infinitely differentiable; this is probably an unrealistic assumption for most 'black-box' objective functions.", "_____no_output_____" ] ], [ [ "# initialize the model with four input points: 0.0, 0.33, 0.66, 1.0\nX = torch.tensor([0.0, 0.33, 0.66, 1.0])\ny = f(X)\ngpmodel = gp.models.GPRegression(X, y, gp.kernels.Matern52(input_dim=1),\n noise=torch.tensor(0.1), jitter=1.0e-4)", "_____no_output_____" ] ], [ [ "The following helper function `update_posterior` will take care of updating our `gpmodel` each time we evaluate $f$ at a new value $x$.", "_____no_output_____" ] ], [ [ "def update_posterior(x_new):\n y = f(x_new) # evaluate f at new point.\n X = torch.cat([gpmodel.X, x_new]) # incorporate new evaluation \n y = torch.cat([gpmodel.y, y]) \n gpmodel.set_data(X, y)\n # optimize the GP hyperparameters using Adam with lr=0.001\n optimizer = torch.optim.Adam(gpmodel.parameters(), lr=0.001)\n gp.util.train(gpmodel, optimizer)", "_____no_output_____" ] ], [ [ "## Define an acquisition function", "_____no_output_____" ], [ "There are many reasonable options for the acquisition function (see references [1] and [2] for a list of popular choices and a discussion of their properties). Here we will use one that is 'simple to implement and interpret,' namely the 'Lower Confidence Bound' acquisition function. \nIt is given by\n\n$$\n\\alpha(x) = \\mu(x) - \\kappa \\sigma(x)\n$$\n\nwhere $\\mu(x)$ and $\\sigma(x)$ are the mean and square root variance of the posterior at the point $x$, and the arbitrary constant $\\kappa>0$ controls the trade-off between exploitation and exploration. This acquisition function will be minimized for choices of $x$ where either: i) $\\mu(x)$ is small (exploitation); or ii) where $\\sigma(x)$ is large (exploration). A large value of $\\kappa$ means that we place more weight on exploration because we prefer candidates $x$ in areas of high uncertainty. A small value of $\\kappa$ encourages exploitation because we prefer candidates $x$ that minimize $\\mu(x)$, which is the mean of our surrogate objective function. We will use $\\kappa=2$.", "_____no_output_____" ] ], [ [ "def lower_confidence_bound(x, kappa=2):\n mu, variance = gpmodel(x, full_cov=False, noiseless=False)\n sigma = variance.sqrt()\n return mu - kappa * sigma", "_____no_output_____" ] ], [ [ "The final component we need is a way to find (approximate) minimizing points $x_{\\rm min}$ of the acquisition function. There are several ways to proceed, including gradient-based and non-gradient-based techniques. Here we will follow the gradient-based approach. One of the possible drawbacks of gradient descent methods is that the minimization algorithm can get stuck at a local minimum. In this tutorial, we adopt a (very) simple approach to address this issue:\n\n- First, we seed our minimization algorithm with 5 different values: i) one is chosen to be $x_{n-1}$, i.e. the candidate $x$ used in the previous step; and ii) four are chosen uniformly at random from the domain of the objective function. \n- We then run the minimization algorithm to approximate convergence for each seed value. \n- Finally, from the five candidate $x$s identified by the minimization algorithm, we select the one that minimizes the acquisition function.\n\nPlease refer to reference [2] for a more detailed discussion of this problem in Bayesian Optimization.", "_____no_output_____" ] ], [ [ "def find_a_candidate(x_init, lower_bound=0, upper_bound=1):\n # transform x to an unconstrained domain\n constraint = constraints.interval(lower_bound, upper_bound)\n unconstrained_x_init = transform_to(constraint).inv(x_init)\n unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(True)\n minimizer = optim.LBFGS([unconstrained_x], line_search_fn='strong_wolfe')\n\n def closure():\n minimizer.zero_grad()\n x = transform_to(constraint)(unconstrained_x)\n y = lower_confidence_bound(x)\n autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))\n return y\n \n minimizer.step(closure)\n # after finding a candidate in the unconstrained domain, \n # convert it back to original domain.\n x = transform_to(constraint)(unconstrained_x)\n return x.detach()", "_____no_output_____" ] ], [ [ "## The inner loop of Bayesian Optimization\n\nWith the various helper functions defined above, we can now encapsulate the main logic of a single step of Bayesian Optimization in the function `next_x`:", "_____no_output_____" ] ], [ [ "def next_x(lower_bound=0, upper_bound=1, num_candidates=5):\n candidates = []\n values = []\n\n x_init = gpmodel.X[-1:]\n for i in range(num_candidates):\n x = find_a_candidate(x_init, lower_bound, upper_bound)\n y = lower_confidence_bound(x)\n candidates.append(x)\n values.append(y)\n x_init = x.new_empty(1).uniform_(lower_bound, upper_bound)\n\n argmin = torch.min(torch.cat(values), dim=0)[1].item()\n return candidates[argmin]", "_____no_output_____" ] ], [ [ "## Running the algorithm", "_____no_output_____" ], [ "To illustrate how Bayesian Optimization works, we make a convenient plotting function that will help us visualize our algorithm's progress.", "_____no_output_____" ] ], [ [ "def plot(gs, xmin, xlabel=None, with_title=True):\n xlabel = \"xmin\" if xlabel is None else \"x{}\".format(xlabel)\n Xnew = torch.linspace(-0.1, 1.1)\n ax1 = plt.subplot(gs[0])\n ax1.plot(gpmodel.X.numpy(), gpmodel.y.numpy(), \"kx\") # plot all observed data\n with torch.no_grad():\n loc, var = gpmodel(Xnew, full_cov=False, noiseless=False)\n sd = var.sqrt()\n ax1.plot(Xnew.numpy(), loc.numpy(), \"r\", lw=2) # plot predictive mean\n ax1.fill_between(Xnew.numpy(), loc.numpy() - 2*sd.numpy(), loc.numpy() + 2*sd.numpy(),\n color=\"C0\", alpha=0.3) # plot uncertainty intervals\n ax1.set_xlim(-0.1, 1.1)\n ax1.set_title(\"Find {}\".format(xlabel))\n if with_title:\n ax1.set_ylabel(\"Gaussian Process Regression\")\n\n ax2 = plt.subplot(gs[1])\n with torch.no_grad():\n # plot the acquisition function\n ax2.plot(Xnew.numpy(), lower_confidence_bound(Xnew).numpy())\n # plot the new candidate point\n ax2.plot(xmin.numpy(), lower_confidence_bound(xmin).numpy(), \"^\", markersize=10,\n label=\"{} = {:.5f}\".format(xlabel, xmin.item())) \n ax2.set_xlim(-0.1, 1.1)\n if with_title:\n ax2.set_ylabel(\"Acquisition Function\")\n ax2.legend(loc=1)", "_____no_output_____" ] ], [ [ "Our surrogate model `gpmodel` already has 4 function evaluations at its disposal; however, we have yet to optimize the GP hyperparameters. So we do that first. Then in a loop we call the `next_x` and `update_posterior` functions repeatedly. The following plot illustrates how Gaussian Process posteriors and the corresponding acquisition functions change at each step in the algorith. Note how query points are chosen both for exploration and exploitation.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12, 30))\nouter_gs = gridspec.GridSpec(5, 2)\noptimizer = torch.optim.Adam(gpmodel.parameters(), lr=0.001)\ngp.util.train(gpmodel, optimizer)\nfor i in range(8):\n xmin = next_x() \n gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer_gs[i])\n plot(gs, xmin, xlabel=i+1, with_title=(i % 2 == 0))\n update_posterior(xmin)\nplt.show()", "_____no_output_____" ] ], [ [ "Because we have assumed that our observations contain noise, it is improbable that we will find the exact minimizer of the function $f$. Still, with a relatively small budget of evaluations (8) we see that the algorithm has converged to very close to the global minimum at $x^* = 0.75725$. \n\nWhile this tutorial is only intended to be a brief introduction to Bayesian Optimization, we hope that we have been able to convey the basic underlying ideas. Consider watching the lecture by Nando de Freitas [3] for an excellent exposition of the basic theory. Finally, the reference paper [2] gives a review of recent research on Bayesian Optimization, together with many discussions about important technical details.", "_____no_output_____" ], [ "## References\n\n[1] `Practical bayesian optimization of machine learning algorithms`,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nJasper Snoek, Hugo Larochelle, and Ryan P. Adams\n\n[2] `Taking the human out of the loop: A review of bayesian optimization`,<br />&nbsp;&nbsp;&nbsp;&nbsp;\nBobak Shahriari, Kevin Swersky, Ziyu Wang, Ryan P. Adams, and Nando De Freitas\n\n[3] [Machine learning - Bayesian optimization and multi-armed bandits](https://www.youtube.com/watch?v=vz3D36VXefI)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d058542a87c903208052f5f657b82347762f3fd5
1,990
ipynb
Jupyter Notebook
notebooks/pandas-all-crimes-csv-data-preview.ipynb
RandomFractals/ChicagoCrimes
379723cc8463d6122931fa37209ece0422432175
[ "Apache-2.0" ]
14
2017-07-12T17:54:37.000Z
2021-06-19T04:20:58.000Z
notebooks/pandas-all-crimes-csv-data-preview.ipynb
RandomFractals/ChicagoCrimes
379723cc8463d6122931fa37209ece0422432175
[ "Apache-2.0" ]
1
2020-10-16T01:20:03.000Z
2020-10-16T01:20:03.000Z
notebooks/pandas-all-crimes-csv-data-preview.ipynb
RandomFractals/ChicagoCrimes
379723cc8463d6122931fa37209ece0422432175
[ "Apache-2.0" ]
6
2018-08-16T21:50:37.000Z
2022-03-29T22:46:30.000Z
22.873563
80
0.559296
[ [ [ "import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('seaborn')", "_____no_output_____" ], [ "%%time\n\n# Note: this is strictly for comparison of how slow it is \n# to load all Chicago crimes data with pandas\n# see notebooks/all-chicago-crime-charts.ipynb notebook \n# for loading same data with dask in a compressed snappy parquet format\n\n# set csv data file path\ncsv_data_file = '../raw_data/Crimes_-_2001_to_present.csv'\nprint('Loading crime data from: {}'.format(csv_data_file))\nprint('...')\n\n# load crimes csv data into pandas dataframe\ncrimes = pd.read_csv(csv_data_file, error_bad_lines=False)\n\nprint('Crime data loaded into memory.')", "Loading crime data from: ../raw_data/Crimes_-_2001_to_present.csv\nCrime data loaded into memory.\nWall time: 2min 13s\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d058554ca53ee84d880fa02765dc37e9adddad8b
965,001
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/old_meerkat_UMAP_basic-checkpoint.ipynb
marathomas/meerkat_umap
1c1c23eba6e6219d777464f5afdb6c778198e09e
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/old_meerkat_UMAP_basic-checkpoint.ipynb
marathomas/meerkat_umap
1c1c23eba6e6219d777464f5afdb6c778198e09e
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/old_meerkat_UMAP_basic-checkpoint.ipynb
marathomas/meerkat_umap
1c1c23eba6e6219d777464f5afdb6c778198e09e
[ "MIT" ]
null
null
null
775.101205
372,128
0.95183
[ [ [ "# UMAP \n\nThis script generates UMAP representations from spectrograms (previously generated).", "_____no_output_____" ], [ "### Installing and loading libraries", "_____no_output_____" ] ], [ [ "import os\nimport pandas as pd\nimport sys\nimport numpy as np\nfrom pandas.core.common import flatten\nimport pickle\nimport umap\nfrom pathlib import Path\nimport datetime\nimport scipy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib\nimport librosa.display\nfrom scipy.spatial.distance import pdist, squareform", "_____no_output_____" ], [ "from plot_functions import umap_2Dplot, mara_3Dplot, plotly_viz\nfrom preprocessing_functions import pad_spectro, calc_zscore, preprocess_spec_numba, create_padded_data", "_____no_output_____" ] ], [ [ "### Setting constants", "_____no_output_____" ], [ "Setting project, input and output folders.", "_____no_output_____" ] ], [ [ "wd = os.getcwd()\n\nDATA = os.path.join(os.path.sep, str(Path(wd).parents[0]), \"data\", \"processed\")\nFIGURES = os.path.join(os.path.sep, str(Path(wd).parents[0]), \"reports\", \"figures\")\n\nDF_DICT = {}\nfor dftype in ['full', 'reduced', 'balanced']:\n DF_DICT[dftype] = os.path.join(os.path.sep, DATA, \"df_focal_\"+dftype+\".pkl\")", "_____no_output_____" ], [ "LOAD_EXISTING = True # if true, load existing embedding instead of creating new\nOVERWRITE_FIGURES = False # if true, overwrite existing figures", "_____no_output_____" ] ], [ [ "# UMAP projection", "_____no_output_____" ], [ "### Choose dataset", "_____no_output_____" ] ], [ [ "#dftype='full'\ndftype='reduced'\n#dftype='balanced'\n\nspec_df = pd.read_pickle(DF_DICT[dftype])\nlabels = spec_df.call_lable.values\nspec_df.shape", "_____no_output_____" ] ], [ [ "### Choose feature", "_____no_output_____" ] ], [ [ "specs = spec_df.spectrograms.copy()\nspecs = [calc_zscore(x) for x in specs] \ndata = create_padded_data(specs)", "_____no_output_____" ] ], [ [ "## Run UMAP", "_____no_output_____" ] ], [ [ "# 3D\n\nembedding_filename = os.path.join(os.path.sep, DATA,'basic_UMAP_3D_'+dftype+'_default_params.csv')\nprint(embedding_filename)\nif (LOAD_EXISTING and os.path.isfile(embedding_filename)):\n embedding = np.loadtxt(embedding_filename, delimiter=\";\")\n print(\"File already exists\")\nelse:\n reducer = umap.UMAP(n_components=3, min_dist = 0, random_state=2204)\n embedding = reducer.fit_transform(data)\n np.savetxt(embedding_filename, embedding, delimiter=\";\")", "/home/mthomas/Documents/MPI_work/projects/meerkat/meerkat_umap_pv/data/processed/basic_UMAP_3D_reduced_default_params.csv\nFile already exists\n" ], [ "# 2D\n\nembedding_filename = os.path.join(os.path.sep, DATA,'basic_UMAP_2D_'+dftype+'_default_params.csv')\nprint(embedding_filename)\nif (LOAD_EXISTING and os.path.isfile(embedding_filename)):\n embedding2D = np.loadtxt(embedding_filename, delimiter=\";\")\n print(\"File already exists\")\nelse:\n reducer = umap.UMAP(n_components=2, min_dist = 0, random_state=2204)\n embedding2D = reducer.fit_transform(data)\n np.savetxt(embedding_filename, embedding2D, delimiter=\";\")", "/home/mthomas/Documents/MPI_work/projects/meerkat/meerkat_umap_pv/data/processed/basic_UMAP_2D_reduced_default_params.csv\nFile already exists\n" ] ], [ [ "## Visualization", "_____no_output_____" ] ], [ [ "pal=\"Set2\"", "_____no_output_____" ] ], [ [ "### 2D Plots", "_____no_output_____" ] ], [ [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES, 'UMAP_2D_plot_'+dftype+'_nolegend.jpg')\nelse:\n outname=None\n \nprint(outname)\numap_2Dplot(embedding2D[:,0], embedding2D[:,1], labels, pal, outname=outname, showlegend=False)", "None\n" ] ], [ [ "### 3D Plot", "_____no_output_____" ], [ "#### Matplotlib ", "_____no_output_____" ] ], [ [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES, 'UMAP_3D_plot_'+dftype+'_nolegend.jpg')\nelse:\n outname=None\n \n \nprint(outname)\nmara_3Dplot(embedding[:,0],\n embedding[:,1],\n embedding[:,2],\n labels,\n pal,\n outname,\n showlegend=False)", "None\n" ] ], [ [ "#### Plotly\n\nInteractive viz in plotly (though without sound or spectrogram)", "_____no_output_____" ] ], [ [ "#plotly_viz(embedding[:,0],\n# embedding[:,1],\n# embedding[:,2],\n# labels,\n# pal)", "_____no_output_____" ] ], [ [ "# Embedding evaluation", "_____no_output_____" ], [ "Evaluate the embedding based on calltype labels of nearest neighbors.", "_____no_output_____" ] ], [ [ "from evaluation_functions import nn, sil", "_____no_output_____" ], [ "# produce nearest neighbor statistics\nnn_stats = nn(embedding, np.asarray(labels), k=5)", "_____no_output_____" ] ], [ [ "## Calculate metrics", "_____no_output_____" ] ], [ [ "print(\"Log final metric (unweighted):\",nn_stats.get_S())\nprint(\"Abs final metric (unweighted):\",nn_stats.get_Snorm())\n\nprint(nn_stats.knn_accuracy())", "Log final metric (unweighted): 61.80374532835941\nAbs final metric (unweighted): 2.353601084423661\n[46.93333333 71.20743034 82.53215978 11.58536585 76.15283267 94.77088949\n 58.66551127]\n" ], [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES, 'heatS_UMAP_'+dftype+'.png')\nelse:\n outname=None\n\nprint(outname)\nnn_stats.plot_heat_S(outname=outname)", "None\n" ], [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES, 'heatSnorm_UMAP_'+dftype+'.png')\nelse:\n outname=None\n\nprint(outname)\nnn_stats.plot_heat_Snorm(outname=outname)", "/home/mthomas/Documents/MPI_work/projects/meerkat/meerkat_umap_pv/reports/figures/heatSnorm_UMAP_reduced.png\n" ], [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES, 'heatfold_UMAP_'+dftype+'.png')\nelse:\n outname=None\n\nprint(outname)\nnn_stats.plot_heat_fold(outname=outname)", "/home/mthomas/Documents/MPI_work/projects/meerkat/meerkat_umap_pv/reports/figures/heatfold_UMAP_reduced.png\n" ] ], [ [ "# Within vs. outside distances", "_____no_output_____" ] ], [ [ "from evaluation_functions import plot_within_without", "_____no_output_____" ], [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES,\"distanceswithinwithout_\"+dftype+\".png\")\nelse:\n outname=None\n\nprint(outname)\nplot_within_without(embedding=embedding, labels=labels, outname=outname)", "_____no_output_____" ] ], [ [ "## Silhouette Plot", "_____no_output_____" ] ], [ [ "sil_stats = sil(embedding, labels)", "_____no_output_____" ], [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep, FIGURES, 'silplot_UMAP_'+dftype+'.png')\nelse:\n outname=None\n\nprint(outname)\nsil_stats.plot_sil(outname=outname)", "_____no_output_____" ], [ "sil_stats.get_avrg_score()", "_____no_output_____" ] ], [ [ "## How many dimensions?", "_____no_output_____" ], [ "Evaluate, how many dimensions are best for the embedding.", "_____no_output_____" ] ], [ [ "specs = spec_df.spectrograms.copy()\n# normalize feature\nspecs = [calc_zscore(x) for x in specs] \n# pad feature\nmaxlen= np.max([spec.shape[1] for spec in specs])\nflattened_specs = [pad_spectro(spec, maxlen).flatten() for spec in specs]\ndata = np.asarray(flattened_specs)\ndata.shape\n\nembeddings = {}\nfor n_dims in range(1,11):\n reducer = umap.UMAP(n_components = n_dims, min_dist = 0, metric='euclidean', random_state=2204)\n embeddings[n_dims] = reducer.fit_transform(data) ", "_____no_output_____" ], [ "labels = spec_df.call_lable.values\ncalltypes = sorted(list(set(labels)))\nk=5\ndims_tab = np.zeros((10,1))\n\nfor n_dims in range(1,11):\n nn_stats = nn(embeddings[n_dims], labels, k=k)\n stats_tab = nn_stats.get_statstab()\n mean_metric = np.mean(np.diagonal(stats_tab.iloc[:-1,]))\n print(mean_metric)\n dims_tab[n_dims-1,:] = mean_metric", "50.58424610415988\n58.563857957885126\n61.80374532835941\n62.77403440295045\n63.00331779923705\n62.982056278621826\n63.05904675978132\n62.815992829918954\n62.84994702911214\n63.15648543919968\n" ], [ "x = np.arange(1,11,1)\ny = dims_tab[:,0]\nplt.plot(x,y, marker='o', markersize=4)\nplt.xlabel(\"N_components\")\nplt.ylabel(\"Embedding score S\")\nplt.xticks(np.arange(0, 11, step=1))\nplt.savefig(os.path.join(os.path.sep,FIGURES,'n_dims.png'), facecolor=\"white\")", "_____no_output_____" ] ], [ [ "Note that this is different than doing UMAP with n=10 components and then selection only the first x dimensions in UMAP space!", "_____no_output_____" ], [ "# Graph from embedding evaluation", "_____no_output_____" ] ], [ [ "if OVERWRITE_FIGURES:\n outname = os.path.join(os.path.sep,FIGURES,'simgraph_test.png')\nelse:\n outname=None\n\nnn_stats.draw_simgraph(outname)", "Graph saved at /home/mthomas/Documents/MPI_work/projects/meerkat/meerkat_umap_pv/reports/figures/simgraph_test.png\n" ] ], [ [ "Resource: https://en.it1352.com/article/d096c1eadbb84c19b038eb9648153346.html", "_____no_output_____" ], [ "# Visualize example nearest neighbors", "_____no_output_____" ] ], [ [ "import random\nimport scipy\nfrom sklearn.neighbors import NearestNeighbors", "_____no_output_____" ], [ "knn=5\n# Find k nearest neighbors\nnbrs = NearestNeighbors(metric='euclidean',n_neighbors=knn+1, algorithm='brute').fit(embedding)\ndistances, indices = nbrs.kneighbors(embedding)\n\n# need to remove the first neighbor, because that is the datapoint itself\nindices = indices[:,1:] \ndistances = distances[:,1:]", "_____no_output_____" ], [ "calltypes = sorted(list(set(spec_df['call_lable'])))\nlabels = spec_df.call_lable.values\nnames = spec_df.Name.values", "_____no_output_____" ], [ "# make plots per calltype\n\nn_examples = 3\n\nfor calltype in calltypes:\n\n fig = plt.figure(figsize=(14,6))\n fig_name = 'NN_viz_'+calltype\n k=1\n call_indices = np.asarray(np.where(labels==calltype))[0]\n\n # randomly choose 3\n random.seed(2204)\n example_indices = random.sample(list(call_indices), n_examples)\n\n for i,ind in enumerate(example_indices):\n img_of_interest = spec_df.iloc[ind,:].spectrograms\n embedding_of_interest = embedding[ind,:]\n plt.subplot(n_examples, knn+1, k)\n #librosa.display.specshow(np.transpose(spec))\n plt.imshow(img_of_interest, interpolation='nearest', origin='lower', aspect='equal') \n #plt.title(calltype+' : 0')\n #plt.title(calltype)\n k=k+1\n\n nearest_neighbors = indices[ind]\n for neighbor in nearest_neighbors:\n neighbor_label = names[neighbor]\n neighbor_embedding = embedding[neighbor,:]\n dist_to_original = scipy.spatial.distance.euclidean(embedding_of_interest, neighbor_embedding)\n neighbor_img = spec_df.iloc[neighbor,:].spectrograms\n plt.subplot(n_examples, knn+1, k)\n plt.imshow(neighbor_img, interpolation='nearest', origin='lower', aspect='equal') \n k=k+1\n \n plt.tight_layout() \n plt.savefig(os.path.join(os.path.sep,FIGURES,fig_name), facecolor=\"white\")\n plt.close()", "_____no_output_____" ], [ "# Randomly choose 10 calls and plot their 4 nearest neighbors\n\nn_examples = 10\n\n\nfig = plt.figure(figsize=(14,25))\nfig_name = 'NN_viz'\nk=1\n\n# randomly choose 3\nrandom.seed(2204)\nexample_indices = random.sample(list(range(embedding.shape[0])), n_examples)\n\nfor i,ind in enumerate(example_indices):\n img_of_interest = spec_df.iloc[ind,:].spectrograms\n embedding_of_interest = embedding[ind,:]\n plt.subplot(n_examples, knn+1, k)\n plt.imshow(img_of_interest, interpolation='nearest', origin='lower', aspect='equal') \n k=k+1\n\n nearest_neighbors = indices[ind]\n for neighbor in nearest_neighbors:\n neighbor_label = names[neighbor]\n neighbor_embedding = embedding[neighbor,:]\n dist_to_original = scipy.spatial.distance.euclidean(embedding_of_interest, neighbor_embedding)\n neighbor_img = spec_df.iloc[neighbor,:].spectrograms\n plt.subplot(n_examples, knn+1, k)\n plt.imshow(neighbor_img, interpolation='nearest', origin='lower', aspect='equal') \n k=k+1\n \nplt.tight_layout() \nplt.savefig(os.path.join(os.path.sep,FIGURES,fig_name), facecolor=\"white\")", "_____no_output_____" ] ], [ [ "# Visualize preprocessing steps", "_____no_output_____" ] ], [ [ "N_MELS = 40\nMEL_BINS_REMOVED_UPPER = 5\nMEL_BINS_REMOVED_LOWER = 5\n\n# make plots\ncalltypes = sorted(list(set(spec_df.call_lable.values)))\n\nfig = plt.figure(figsize=(10,6))\nfig_name = 'preprocessing_examples_mara.png'\n\nfig.suptitle('Preprocessing steps', fontsize=16)\nk=1\n\n# randomly choose 4\nexamples = spec_df.sample(n=6, random_state=1)\nexamples.reset_index(inplace=True)\nori_specs = examples.denoised_spectrograms\n\n\n# original\n\nspecs = ori_specs\nvmin = np.min([np.min(x) for x in specs])\nvmax = np.max([np.max(x) for x in specs])\n\nfor i in range(examples.shape[0]):\n spec = specs[i]\n plt.subplot(5, 6, k)\n #librosa.display.specshow(spec, y_axis='mel', fmin=0, fmax=4000)\n plt.imshow(spec, interpolation='nearest', origin='lower', aspect='equal', norm=None,vmin=vmin, vmax=vmax)\n if i==0: plt.ylabel('none', rotation=0, labelpad=30)\n plt.title(\"Example \"+str(i+1))\n k=k+1\n\n# z-score\nspecs = ori_specs.copy()\n#specs = [x[MEL_BINS_REMOVED_LOWER:(N_MELS-MEL_BINS_REMOVED_UPPER),:] for x in specs]\nspecs = [calc_zscore(s) for s in specs]\n#vmin = np.min([np.min(x) for x in specs])\n#vmax = np.max([np.max(x) for x in specs])\nfor i in range(examples.shape[0]):\n spec = specs[i]\n plt.subplot(5, 6, k)\n plt.imshow(spec, interpolation='nearest', origin='lower', aspect='equal')\n if i==0: plt.ylabel('zs', rotation=0, labelpad=30)\n k=k+1\n\n# cut\nfor i in range(examples.shape[0]):\n spec = ori_specs[i]\n spec = spec[MEL_BINS_REMOVED_LOWER:(N_MELS-MEL_BINS_REMOVED_UPPER),:]\n spec = calc_zscore(spec)\n plt.subplot(5, 6, k)\n plt.imshow(spec, interpolation='nearest', origin='lower', aspect='equal') \n if i==0: plt.ylabel('zs-cu', rotation=0, labelpad=30)\n k=k+1\n\n# floor\nfor i in range(examples.shape[0]):\n spec = ori_specs[i]\n spec = spec[MEL_BINS_REMOVED_LOWER:(N_MELS-MEL_BINS_REMOVED_UPPER),:]\n spec = calc_zscore(spec)\n spec = np.where(spec < 0, 0, spec)\n plt.subplot(5, 6, k)\n plt.imshow(spec, interpolation='nearest', origin='lower', aspect='equal') \n if i==0: plt.ylabel('zs-cu-fl', rotation=0, labelpad=30)\n k=k+1\n\n# ceiling\nfor i in range(examples.shape[0]):\n spec = ori_specs[i]\n\n spec = spec[MEL_BINS_REMOVED_LOWER:(N_MELS-MEL_BINS_REMOVED_UPPER),:]\n spec = calc_zscore(spec)\n spec = np.where(spec < 0, 0, spec)\n spec = np.where(spec > 3, 3, spec)\n plt.subplot(5, 6, k)\n plt.imshow(spec, interpolation='nearest', origin='lower', aspect='equal') \n if i==0: plt.ylabel('zs-cu-fl-ce', rotation=0, labelpad=30)\n k=k+1\n\nplt.tight_layout()\noutname= os.path.join(os.path.sep,FIGURES,fig_name)\nprint(outname)\nplt.savefig(outname)", "/home/mthomas/Documents/MPI_work/projects/meerkat/meerkat_umap_pv/reports/figures/preprocessing_examples_mara.png\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d05857dd5efd4abe81e3a984a316062ed1bd1d2d
125,596
ipynb
Jupyter Notebook
exploratory_analysis/eda_scene.ipynb
and-le/cse-151b-argoverse
6f65bab980121d14bf811061072306c9ac8a2fb9
[ "MIT" ]
null
null
null
exploratory_analysis/eda_scene.ipynb
and-le/cse-151b-argoverse
6f65bab980121d14bf811061072306c9ac8a2fb9
[ "MIT" ]
null
null
null
exploratory_analysis/eda_scene.ipynb
and-le/cse-151b-argoverse
6f65bab980121d14bf811061072306c9ac8a2fb9
[ "MIT" ]
null
null
null
382.914634
116,032
0.926789
[ [ [ "There are 76,670 different agent ids in the training data.", "_____no_output_____" ] ], [ [ "import os\nimport pickle\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "%matplotlib inline\nsns.set(rc={\"figure.dpi\":100, 'savefig.dpi':100})\nsns.set_context('notebook')", "_____no_output_____" ], [ "# Keys to the pickle objects\nCITY = 'city'\nLANE = 'lane'\nLANE_NORM = 'lane_norm'\nSCENE_IDX = 'scene_idx'\nAGENT_ID = 'agent_id'\nP_IN = 'p_in'\nV_IN = 'v_in'\nP_OUT = 'p_out'\nV_OUT = 'v_out'\nCAR_MASK = 'car_mask'\nTRACK_ID = 'track_id'\n\n# Set the training and test paths\nTEST_PATH = '../new_val_in/'\nTRAIN_PATH = '../new_train/'\ntrain_path = TRAIN_PATH\ntest_path = TEST_PATH\n\n# DUMMY_TRAIN_PATH = './dummy_train/'\n# DUMMY_TEST_PATH = './dummy_val/'\n# train_path = DUMMY_TRAIN_PATH\n# test_path = DUMMY_TEST_PATH", "_____no_output_____" ] ], [ [ "# Size of training and test data", "_____no_output_____" ] ], [ [ "train_size = len([entry for entry in os.scandir(train_path)])\ntest_size = len([entry for entry in os.scandir(test_path)])\n\nprint(f\"Number of training samples = {train_size}\")\nprint(f\"Number of test samples = {test_size}\")", "Number of training samples = 205942\nNumber of test samples = 3200\n" ] ], [ [ "# Scene object", "_____no_output_____" ] ], [ [ "# Open directory containing pickle files\nwith os.scandir(train_path) as entries:\n scene = None\n\n # Get the first pickle file\n entry = next(entries)\n \n # Open the first pickle file and store its data\n with open(entry, \"rb\") as file:\n scene = pickle.load(file)\n\n# Look at key-value pairs\nprint('Scene object:')\nfor k, v in scene.items():\n if type(v) is np.ndarray:\n print(f\"{k} : shape = {v.shape}\")\n else:\n print(f\"{k} : {type(v)}\")", "Scene object:\ncity : <class 'str'>\nlane : shape = (72, 3)\nlane_norm : shape = (72, 3)\nscene_idx : <class 'int'>\nagent_id : <class 'str'>\ncar_mask : shape = (60, 1)\np_in : shape = (60, 19, 2)\nv_in : shape = (60, 19, 2)\np_out : shape = (60, 30, 2)\nv_out : shape = (60, 30, 2)\ntrack_id : shape = (60, 30, 1)\n" ] ], [ [ "# Scene Analysis", "_____no_output_____" ] ], [ [ "random.seed(1)", "_____no_output_____" ], [ "def lane_centerline(scene):\n lane = scene[LANE]\n lane_norm = scene[LANE_NORM]\n \n fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n ax1.quiver(lane[:, 0], lane[:, 1], lane_norm[:, 0], lane_norm[:, 1], color='gray')\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.set_title('Lane centerline')", "_____no_output_____" ], [ "def target_agent(scene):\n lane = scene[LANE]\n lane_norm = scene[LANE_NORM]\n \n pin = scene[P_IN]\n pout = scene[P_OUT]\n vin = scene[V_IN]\n vout = scene[V_OUT]\n # Get the index of the target agent\n targ = np.where(scene[TRACK_ID][:, 0, 0] == scene[AGENT_ID])[0][0]\n \n fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.set_title('Target agent motion') \n \n ax1.quiver(lane[:, 0], lane[:, 1], lane_norm[:, 0], lane_norm[:, 1], units='xy', color='black')\n ax1.quiver(pin[targ, :, 0], pin[targ, :, 1], vin[targ, :, 0], vin[targ, :, 1], color='red', units='xy');\n ax1.quiver(pout[targ, :, 0], pout[targ, :, 1], vout[targ, :, 0], vout[targ, :, 1], color='blue', units='xy');", "_____no_output_____" ], [ "def full_scene(scene):\n lane = scene[LANE]\n lane_norm = scene[LANE_NORM]\n \n pin = scene[P_IN]\n pout = scene[P_OUT]\n vin = scene[V_IN]\n vout = scene[V_OUT]\n # Get the index of the target agent\n targ = np.where(scene[TRACK_ID][:, 0, 0] == scene[AGENT_ID])[0][0]\n \n actual_idxs = np.where(scene[CAR_MASK][:, 0] == 1) # Row indexes of actually tracked agents\n pin_other = scene[P_IN][actual_idxs]\n vin_other = scene[V_IN][actual_idxs]\n pout_other = scene[P_OUT][actual_idxs]\n vout_other = scene[V_OUT][actual_idxs]\n \n fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(7, 7))\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.set_title('Scene ' + str(scene[SCENE_IDX]))\n ax1.quiver(lane[:, 0], lane[:, 1], lane_norm[:, 0], lane_norm[:, 1], units='xy', color='gray', label='Center line(s)')\n \n # Index of the last other agent - can either be the last element in the array or the element right before\n # target when target is the last element in the array\n last_other = len(actual_idxs[0]) - 1 if targ != len(actual_idxs[0]) - 1 else targ - 1\n \n for i in range(len(actual_idxs[0])):\n # Non target agent\n if i != targ:\n if i == last_other:\n ax1.quiver(pin[i, :, 0], pin[i, :, 1], vin[i, :, 0], vin[i, :, 1], \n color='orange', units='xy', label='Other agent input')\n ax1.quiver(pout[i, :, 0], pout[i, :, 1], vout[i, :, 0], vout[i, :, 1], \n color='blue', units='xy', label='Other agent output')\n else:\n ax1.quiver(pin[i, :, 0], pin[i, :, 1], vin[i, :, 0], vin[i, :, 1], \n color='orange', units='xy', label='_nolegend_')\n ax1.quiver(pout[i, :, 0], pout[i, :, 1], vout[i, :, 0], vout[i, :, 1], \n color='blue', units='xy', label='_nolegend_')\n set_other_legend = True\n \n else:\n ax1.quiver(pin[targ, :, 0], pin[targ, :, 1], vin[targ, :, 0], vin[targ, :, 1], \n color='lightgreen', units='xy', label='Target agent input')\n ax1.quiver(pout[targ, :, 0], pout[targ, :, 1], vout[targ, :, 0], vout[targ, :, 1], \n color='darkgreen', units='xy', label='Target agent output')\n \n \n ax1.legend()", "_____no_output_____" ], [ "# Randomly pick a scene\nscene = None\nrand = random.choice(os.listdir(train_path))\n# Build out full path name\nrand = train_path + rand\nwith open(rand, \"rb\") as file:\n scene = pickle.load(file)\n \nscene[SCENE_IDX]\n# lane_centerline(scene)\n# target_agent(scene)\nfull_scene(scene)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d05885a31951837c3f906fe7fdc43ceac2b3314a
13,492
ipynb
Jupyter Notebook
TensorFlow Advanced Techniques Specialization/Course-2/Custom and Distributed Training with TensorFlow/Week-1/C2W1_Assignment.ipynb
nafiul-araf/TensorFlow-Advanced-Techniques-Specialization
a69aa9139f266a2e601433339d97ca9146029378
[ "Apache-2.0" ]
null
null
null
TensorFlow Advanced Techniques Specialization/Course-2/Custom and Distributed Training with TensorFlow/Week-1/C2W1_Assignment.ipynb
nafiul-araf/TensorFlow-Advanced-Techniques-Specialization
a69aa9139f266a2e601433339d97ca9146029378
[ "Apache-2.0" ]
null
null
null
TensorFlow Advanced Techniques Specialization/Course-2/Custom and Distributed Training with TensorFlow/Week-1/C2W1_Assignment.ipynb
nafiul-araf/TensorFlow-Advanced-Techniques-Specialization
a69aa9139f266a2e601433339d97ca9146029378
[ "Apache-2.0" ]
null
null
null
24.801471
291
0.491254
[ [ [ "# Basic Tensor operations and GradientTape.\n\nIn this graded assignment, you will perform different tensor operations as well as use [GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape). These are important building blocks for the next parts of this course so it's important to master the basics. Let's begin!", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Exercise 1 - [tf.constant]((https://www.tensorflow.org/api_docs/python/tf/constant))\n\nCreates a constant tensor from a tensor-like object. ", "_____no_output_____" ] ], [ [ "# Convert NumPy array to Tensor using `tf.constant`\ndef tf_constant(array):\n \"\"\"\n Args:\n array (numpy.ndarray): tensor-like array.\n\n Returns:\n tensorflow.python.framework.ops.EagerTensor: tensor.\n \"\"\"\n ### START CODE HERE ###\n tf_constant_array = tf.constant(array)\n ### END CODE HERE ###\n return tf_constant_array", "_____no_output_____" ], [ "tmp_array = np.arange(1,10)\nx = tf_constant(tmp_array)\nx\n\n# Expected output:\n# <tf.Tensor: shape=(9,), dtype=int64, numpy=array([1, 2, 3, 4, 5, 6, 7, 8, 9])>", "_____no_output_____" ] ], [ [ "Note that for future docstrings, the type `EagerTensor` will be used as a shortened version of `tensorflow.python.framework.ops.EagerTensor`.", "_____no_output_____" ], [ "## Exercise 2 - [tf.square](https://www.tensorflow.org/api_docs/python/tf/math/square)\n\nComputes the square of a tensor element-wise.", "_____no_output_____" ] ], [ [ "# Square the input tensor\ndef tf_square(array):\n \"\"\"\n Args:\n array (numpy.ndarray): tensor-like array.\n\n Returns:\n EagerTensor: tensor.\n \"\"\"\n # make sure it's a tensor\n array = tf.constant(array)\n \n ### START CODE HERE ###\n tf_squared_array = tf.square(array)\n ### END CODE HERE ###\n return tf_squared_array", "_____no_output_____" ], [ "tmp_array = tf.constant(np.arange(1, 10))\nx = tf_square(tmp_array)\nx\n\n# Expected output:\n# <tf.Tensor: shape=(9,), dtype=int64, numpy=array([ 1, 4, 9, 16, 25, 36, 49, 64, 81])>", "_____no_output_____" ] ], [ [ "## Exercise 3 - [tf.reshape](https://www.tensorflow.org/api_docs/python/tf/reshape)\n\nReshapes a tensor.", "_____no_output_____" ] ], [ [ "# Reshape tensor into the given shape parameter\ndef tf_reshape(array, shape):\n \"\"\"\n Args:\n array (EagerTensor): tensor to reshape.\n shape (tuple): desired shape.\n\n Returns:\n EagerTensor: reshaped tensor.\n \"\"\"\n # make sure it's a tensor\n array = tf.constant(array)\n ### START CODE HERE ###\n tf_reshaped_array = tf.reshape(array, shape = shape)\n ### END CODE HERE ###\n return tf_reshaped_array", "_____no_output_____" ], [ "# Check your function\ntmp_array = np.array([1,2,3,4,5,6,7,8,9])\n# Check that your function reshapes a vector into a matrix\nx = tf_reshape(tmp_array, (3, 3))\nx\n\n# Expected output:\n# <tf.Tensor: shape=(3, 3), dtype=int64, numpy=\n# [[1, 2, 3],\n# [4, 5, 6],\n# [7, 8, 9]]", "_____no_output_____" ] ], [ [ "## Exercise 4 - [tf.cast](https://www.tensorflow.org/api_docs/python/tf/cast)\n\nCasts a tensor to a new type.", "_____no_output_____" ] ], [ [ "# Cast tensor into the given dtype parameter\ndef tf_cast(array, dtype):\n \"\"\"\n Args:\n array (EagerTensor): tensor to be casted.\n dtype (tensorflow.python.framework.dtypes.DType): desired new type. (Should be a TF dtype!)\n\n Returns:\n EagerTensor: casted tensor.\n \"\"\"\n # make sure it's a tensor\n array = tf.constant(array)\n\n ### START CODE HERE ###\n tf_cast_array = tf.cast(array, dtype = dtype)\n ### END CODE HERE ###\n return tf_cast_array", "_____no_output_____" ], [ "# Check your function\ntmp_array = [1,2,3,4]\nx = tf_cast(tmp_array, tf.float32)\nx\n\n# Expected output:\n# <tf.Tensor: shape=(4,), dtype=float32, numpy=array([1., 2., 3., 4.], dtype=float32)>", "_____no_output_____" ] ], [ [ "## Exercise 5 - [tf.multiply](https://www.tensorflow.org/api_docs/python/tf/multiply)\n\nReturns an element-wise x * y.", "_____no_output_____" ] ], [ [ "# Multiply tensor1 and tensor2\ndef tf_multiply(tensor1, tensor2):\n \"\"\"\n Args:\n tensor1 (EagerTensor): a tensor.\n tensor2 (EagerTensor): another tensor.\n\n Returns:\n EagerTensor: resulting tensor.\n \"\"\"\n # make sure these are tensors\n tensor1 = tf.constant(tensor1)\n tensor2 = tf.constant(tensor2)\n \n ### START CODE HERE ###\n product = tf.multiply(tensor1, tensor2)\n ### END CODE HERE ###\n return product\n", "_____no_output_____" ], [ "# Check your function\ntmp_1 = tf.constant(np.array([[1,2],[3,4]]))\ntmp_2 = tf.constant(np.array(2))\nresult = tf_multiply(tmp_1, tmp_2)\nresult\n\n# Expected output:\n# <tf.Tensor: shape=(2, 2), dtype=int64, numpy=\n# array([[2, 4],\n# [6, 8]])>", "_____no_output_____" ] ], [ [ "## Exercise 6 - [tf.add](https://www.tensorflow.org/api_docs/python/tf/add)\n\nReturns x + y element-wise.", "_____no_output_____" ] ], [ [ "# Add tensor1 and tensor2\ndef tf_add(tensor1, tensor2):\n \"\"\"\n Args:\n tensor1 (EagerTensor): a tensor.\n tensor2 (EagerTensor): another tensor.\n\n Returns:\n EagerTensor: resulting tensor.\n \"\"\"\n # make sure these are tensors\n tensor1 = tf.constant(tensor1)\n tensor2 = tf.constant(tensor2)\n \n ### START CODE HERE ###\n total = tf.add(tensor1, tensor2)\n ### END CODE HERE ###\n return total", "_____no_output_____" ], [ "# Check your function\ntmp_1 = tf.constant(np.array([1, 2, 3]))\ntmp_2 = tf.constant(np.array([4, 5, 6]))\ntf_add(tmp_1, tmp_2)\n\n# Expected output:\n# <tf.Tensor: shape=(3,), dtype=int64, numpy=array([5, 7, 9])>", "_____no_output_____" ] ], [ [ "## Exercise 7 - Gradient Tape\n\nImplement the function `tf_gradient_tape` by replacing the instances of `None` in the code below. The instructions are given in the code comments.\n\nYou can review the [docs](https://www.tensorflow.org/api_docs/python/tf/GradientTape) or revisit the lectures to complete this task.", "_____no_output_____" ] ], [ [ "def tf_gradient_tape(x):\n \"\"\"\n Args:\n x (EagerTensor): a tensor.\n\n Returns:\n EagerTensor: Derivative of z with respect to the input tensor x.\n \"\"\"\n with tf.GradientTape() as t:\n \n ### START CODE HERE ###\n # Record the actions performed on tensor x with `watch`\n t.watch(x) \n\n # Define a polynomial of form 3x^3 - 2x^2 + x\n y = (3 * (x ** 3)) - (2 * (x ** 2)) + x\n\n # Obtain the sum of the elements in variable y\n z = tf.reduce_sum(y)\n \n # Get the derivative of z with respect to the original input tensor x\n dz_dx = t.gradient(z, x)\n ### END CODE HERE\n \n return dz_dx", "_____no_output_____" ], [ "# Check your function\ntmp_x = tf.constant(2.0)\ndz_dx = tf_gradient_tape(tmp_x)\nresult = dz_dx.numpy()\nresult\n\n# Expected output:\n# 29.0", "_____no_output_____" ] ], [ [ "**Congratulations on finishing this week's assignment!**\n\n**Keep it up!**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0588fa70c427cc5ff78ea82a9995fe638414857
286,386
ipynb
Jupyter Notebook
Exploratory Data Analysis.ipynb
full-void/data-science-concepts
b95d1ee4be792e8037358eebbd873c4de1059266
[ "Apache-2.0" ]
1
2021-09-29T19:52:47.000Z
2021-09-29T19:52:47.000Z
Exploratory Data Analysis.ipynb
full-void/data-science-concepts
b95d1ee4be792e8037358eebbd873c4de1059266
[ "Apache-2.0" ]
null
null
null
Exploratory Data Analysis.ipynb
full-void/data-science-concepts
b95d1ee4be792e8037358eebbd873c4de1059266
[ "Apache-2.0" ]
null
null
null
77.213804
28,896
0.718541
[ [ [ "# Exploratory Data Analysis\n\nIn this notebook, I have illuminated some of the strategies that one can use to explore the data and gain some insights about it.\n\nWe will start from finding metadata about the data, to determining what techniques to use, to getting some important insights about the data. This is based on the IBM's Data Analysis with Python course on Coursera. \n\n## The Problem\nThe problem is to find the variables that impact the car price. For this problem, we will use a real-world dataset that details information about cars.\n\nThe dataset used is an open-source dataset made available by Jeffrey C. Schlimmer. The one used in this notebook is hosted on the IBM Cloud. The dataset provides details of some cars. It includes properties like make, horse-power, price, wheel-type and so on. ", "_____no_output_____" ], [ "## Loading data and finding the metadata\n", "_____no_output_____" ], [ " Import libraries \n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\n%matplotlib inline ", "_____no_output_____" ] ], [ [ "Load the data as pandas dataframe\n", "_____no_output_____" ] ], [ [ "path='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/automobileEDA.csv'\ndf = pd.read_csv(path)\ndf.head()", "_____no_output_____" ] ], [ [ "### Metadata: The columns's types\n\nFinding column's types is an important step. It serves two purposes:\n1. See if we need to convert some data. For example, price may be in string instead of numbers. This is very important as it could throw everything that we do afterwards off.\n2. Find out what type of analysis we need to do with what column. After fixing the problems given above, the type of the object is often a great indicator of whether the data is categorical or numerical. This is important as it would determine what kind of exploratory analysis we can and want to do. \n\nTo find out the type, we can simply use `.dtypes` property of the dataframe. Here's an example using the dataframe we loaded above.", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "From the results above, we can see that we can roughly divide the types into two categories: numeric (int64 and float64) and object. Although object type can contain lots of things, it's used often to store string variables. A quick glance at the table tells us that there's no glaring errors in object types. \n\nNow we divide them into two categories: numerical variables and categorical variables. Numerical, as the name states, are the variables that hold numerical data. Categorical variables hold string that describes a certain property of the data (such as Audi as the make).\n\nMake a special note that our target variable, price, is numerical. So the relationships we would be exploring would be between numerical-and-numerical data and numerical-and-categorical data. ", "_____no_output_____" ], [ "## Relationship between Numerical Data\n\nFirst we will explore the relationship between two numerical data and see if we can learn some insights out of it.\n\nIn the beginning, it's helpful to get the correlation between the variables. For this, we can use the `corr()` method to find out the correlation between all the variables. \n\nDo note that the method finds out the Pearson correlation. Natively, pandas also support Spearman and the Kendall Tau correlation. You can also pass in a custom callable if you want. Check out the docs for more info.\n\nHere's how to do it with the dataframe that we have:", "_____no_output_____" ] ], [ [ "df.corr()", "_____no_output_____" ] ], [ [ "Note that the diagonal elements are always one; because correlation with itself is always one. \n\nNow, it seems somewhat daunting, and frankly, unneccessary to have this big of a table and correlation between things we don't care (say bore and stroke). If we want to find out the correlation with just price, using `corrwith()` method is helpful. \n\nHere's how to do it:", "_____no_output_____" ] ], [ [ "corr = df.corrwith(df['price'])\n\n# Prettify\npd.DataFrame(data=corr.values, index=corr.index, columns=['Correlation'])", "_____no_output_____" ] ], [ [ "From the table above, we have some idea about what can we expect the relationship should be like. \n\nAs a refresher, in Pearson correlation, values range in [-1, 1] with -1 and 1 implying a perfect linear relationship and 0 implying none. A positive value implies a positive relationship (value increase in response to increment) and negative value implies negative relationship (value decrease in response to increment).\n\nThe next step is to have a more visual outlook on the relationship.", "_____no_output_____" ], [ "### Visualizing Relationships \n\nContinuous numerical variables are variables that may contain any value within some range. In pandas dtype, continuous numerical variables can have the type \"int64\" or \"float64\". \n\nScatterplots are a great way to visualize these variables is by using scatterplots.\n\nTo take it further, it's better to use a scatter plot with a regression line. This should also be able to provide us with some preliminary ways to test our hypothesis of the relationship between them. \n\nIn this notebook, we would be using the `regplot()` function in the `seaborn` package.\n\nBelow are some examples.", "_____no_output_____" ], [ "<h4>Positive linear relationship</h4>\n", "_____no_output_____" ], [ "Let's plot \"engine-size\" vs \"price\" since the correlation between them seems strong. \n", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(5,5))\nsns.regplot(x=\"engine-size\", y=\"price\", data=df);", "_____no_output_____" ] ], [ [ "As the engine-size goes up, the price goes up. This indicates a decent positive direct correlation between these two variables. Thus, we can say that the engine size is a good predictor of price since the regression line is almost a perfect diagonal line.\n\nWe can also check this with the Pearson correlation we got above. It's 0.87, which means sense. ", "_____no_output_____" ], [ "Let's also try highway mpg too since the correlation between them is -0.7 ", "_____no_output_____" ] ], [ [ "sns.regplot(x=\"highway-mpg\", y=\"price\", data=df);", "_____no_output_____" ] ], [ [ "The graph shows a decent negative realtionship. So, it could be a potential indicator. Although, it seems that the relationship isn't exactly normal--given the curve of the points. \n\nLet's try a higher order regression line.", "_____no_output_____" ] ], [ [ "sns.regplot(x=\"highway-mpg\", y=\"price\", data=df, order=2);", "_____no_output_____" ] ], [ [ "There. It seems much better.", "_____no_output_____" ], [ "### Weak Linear Relationship", "_____no_output_____" ], [ "Not all variables have to be correlated. Let's check out the graph of \"Peak-rpm\" as a predictor variable for \"price\".", "_____no_output_____" ] ], [ [ "sns.regplot(x=\"peak-rpm\", y=\"price\", data=df);", "_____no_output_____" ] ], [ [ "From the graph, it's clear that peak rpm is a bad indicator of price. It seems that there is no relationship between them. It seems almost random. \n\nA quick check at the correlation value confirms this. The value is -0.1. It's very close to zero, implying no relationship. \n\nAlthough there are cases in which low value can be misguiding, it's usually only for relationships that show a non-linear relationship in which value goes down and up. But the graph confirms there is none.", "_____no_output_____" ], [ "## Relationship between Numerical and Categorical data\n\nCategorical variables, like their name imply, divide the data into certain categories. They essentially describe a 'characteristic' of the data unit, and are often selected from a small group of categories.\n\nAlthough they commonly have \"object\" type, it's possible to have them has \"int64\" too (for example 'Level of happiness'). ", "_____no_output_____" ], [ "### Visualizing with Boxplots\n\nBoxplots are a great way to visualize such relationships. Boxplots essentially show the spread of the data. You can use the `boxplot()` function in the seaborn package. Alternatively, you can use boxen or violin plots too.\n\nHere's an example by plotting relationship between \"body-style\" and \"price\"", "_____no_output_____" ] ], [ [ "sns.boxplot(x=\"body-style\", y=\"price\", data=df);", "_____no_output_____" ] ], [ [ "We can infer that there is likely to be no significant relationship as there is a decent over lap. \n\nLet's examine engine \"engine-location\" and \"price\"", "_____no_output_____" ] ], [ [ "sns.boxplot(x=\"engine-location\", y=\"price\", data=df);", "_____no_output_____" ] ], [ [ "Although there are a lot of outliers for the front, the distribution of price between these two engine-location categories is distinct enough to take engine-location as a potential good predictor of price.\n\nLet's examine \"drive-wheels\" and \"price\".", "_____no_output_____" ] ], [ [ "sns.boxplot(x=\"drive-wheels\", y=\"price\", data=df);", "_____no_output_____" ] ], [ [ "<p>Here we see that the distribution of price between the different drive-wheels categories differs; as such drive-wheels could potentially be a predictor of price.</p>\n", "_____no_output_____" ], [ "### Statistical method to checking for a significant realtionship - ANOVA\n\nAlthough visualisation is helpful, it does not give us a concrete and certain vision in this (and often in others) case. So, it follows that we would want a metric to evaluate it by. For correlation between categorical and continuous variable, there are various tests. ANOVA family of tests is a common one to use.\n\nThe Analysis of Variance (ANOVA) is a statistical method used to test whether there are significant differences between the means of two or more groups. \n\nDo note that ANOVA is an _omnibus_ test statistic and it can't tell you what groups are the ones that have correlation among them. Only that there are at least two groups with a significant difference. \n\nIn python, we can calculate the ANOVA statistic fairly easily using the `scipy.stats` module. The function `f_oneway()` calculates and returns: \n__F-test score__: ANOVA assumes the means of all groups are the same, calculates how much the actual means deviate from the assumption, and reports it as the F-test score. A larger score means there is a larger difference between the means. Although the degree of the 'largeneess' differs from data to data. You can use the F-table to find out the critical F-value by using the significance level and degrees of freedom for numerator and denominator and compare it with the calculated F-test score.\n\n__P-value__: P-value tells how statistically significant is our calculated score value.\n\nIf the variables are strongly correlated, the expectation is to have ANOVA to return a sizeable F-test score and a small p-value.", "_____no_output_____" ], [ "#### Drive Wheels\n\nSince ANOVA analyzes the difference between different groups of the same variable, the `groupby()` function will come in handy. With this, we can easily and concisely seperate the dataset into groups of drive-wheels. Essentially, the function allows us to split the dataset into groups and perform calculations on groups moving forward. Check out Grouping below for more explanation.\n\nLet's see if different types 'drive-wheels' impact 'price', we group the data.", "_____no_output_____" ] ], [ [ "grouped_anova = df[['drive-wheels', 'price']].groupby(['drive-wheels'])\ngrouped_anova.head(2)", "_____no_output_____" ] ], [ [ "We can obtain the values of the method group using the method `get_group()` ", "_____no_output_____" ] ], [ [ "grouped_anova.get_group('4wd')['price']", "_____no_output_____" ] ], [ [ "Finally, we use the function `f_oneway()` to obtain the F-test score and P-value.", "_____no_output_____" ] ], [ [ "# ANOVA\nf_val, p_val = stats.f_oneway(grouped_anova.get_group('fwd')['price'], grouped_anova.get_group('rwd')['price'], grouped_anova.get_group('4wd')['price']) \n \nprint( \"ANOVA results: F=\", f_val, \", P =\", p_val) ", "ANOVA results: F= 67.95406500780399 , P = 3.3945443577151245e-23\n" ] ], [ [ "From the result, we can see that we have a large F-test score and a very small p-value. Still, we need to check if all three tested groups are highly correlated?", "_____no_output_____" ], [ "#### Separately: fwd and rwd", "_____no_output_____" ] ], [ [ "f_val, p_val = stats.f_oneway(grouped_anova.get_group('fwd')['price'], grouped_anova.get_group('rwd')['price']) \n \nprint( \"ANOVA results: F=\", f_val, \", P =\", p_val )", "ANOVA results: F= 130.5533160959111 , P = 2.2355306355677845e-23\n" ] ], [ [ "Seems like the result is significant and they are correlated. Let's examine the other groups", "_____no_output_____" ], [ "#### 4wd and rwd", "_____no_output_____" ] ], [ [ "f_val, p_val = stats.f_oneway(grouped_anova.get_group('4wd')['price'], grouped_anova.get_group('rwd')['price']) \n \nprint( \"ANOVA results: F=\", f_val, \", P =\", p_val) ", "ANOVA results: F= 8.580681368924756 , P = 0.004411492211225333\n" ] ], [ [ "<h4>4wd and fwd</h4>\n", "_____no_output_____" ] ], [ [ "f_val, p_val = stats.f_oneway(grouped_anova.get_group('4wd')['price'], grouped_anova.get_group('fwd')['price']) \n \nprint(\"ANOVA results: F=\", f_val, \", P =\", p_val) ", "ANOVA results: F= 0.665465750252303 , P = 0.41620116697845666\n" ] ], [ [ "## Relationship between Categorical Data: Corrected Cramer's V\n\nA good way to test relation between two categorical variable is Corrected Cramer's V. \n\n**Note:** A p-value close to zero means that our variables are very unlikely to be completely unassociated in some population. However, this does not mean the variables are strongly associated; a weak association in a large sample size may also result in p = 0.000.\n\n**General Rule of Thumb:**\n* V ∈ [0.1,0.3]: weak association\n* V ∈ [0.4,0.5]: medium association\n* V > 0.5: strong association\n\nHere's how to do it in python:\n\n```python\nimport scipy.stats as ss\nimport pandas as pd\nimport numpy as np\ndef cramers_corrected_stat(x, y):\n \"\"\" calculate Cramers V statistic for categorial-categorial association.\n uses correction from Bergsma and Wicher, \n Journal of the Korean Statistical Society 42 (2013): 323-328\n \"\"\"\n result = -1\n if len(x.value_counts()) == 1:\n print(\"First variable is constant\")\n elif len(y.value_counts()) == 1:\n print(\"Second variable is constant\")\n else:\n conf_matrix = pd.crosstab(x, y)\n\n if conf_matrix.shape[0] == 2:\n correct = False\n else:\n correct = True\n\n chi2, p = ss.chi2_contingency(conf_matrix, correction=correct)[0:2]\n\n n = sum(conf_matrix.sum())\n phi2 = chi2/n\n r, k = conf_matrix.shape\n phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))\n rcorr = r - ((r-1)**2)/(n-1)\n kcorr = k - ((k-1)**2)/(n-1)\n result = np.sqrt(phi2corr / min((kcorr-1), (rcorr-1)))\n return round(result, 6), round(p, 6)\n```", "_____no_output_____" ], [ "## Descriptive Statistical Analysis", "_____no_output_____" ], [ "Although the insights gained above are significant, it's clear we need more work. \n\nSince we are exploring the data, performing some common and useful descriptive statistical analysis would be nice. However, there are a lot of them and would require a lot of work to do them by scratch. Fortunately, `pandas` library has a neat method that computes all of them for us.\n\nThe `describe()` method, when invoked on a dataframe automatically computes basic statistics for all continuous variables. Do note that any NaN values are automatically skipped in these statistics. By default, it will show stats for numerical data.\n\nHere's what it will show:\n* Count of that variable\n* Mean\n* Standard Deviation (std) \n* Minimum Value\n* IQR (Interquartile Range: 25%, 50% and 75%)\n* Maximum Value\n\nIf you want, you can change the percentiles too. Check out the docs for that. \n\nHere's how to do it in our dataframe:", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "To get the information about categorical variables, we need to specifically tell it to pandas to include them. \n\nFor categorical variables, it shows:\n* Count\n* Unique values\n* The most common value or 'top'\n* Frequency of the 'top'", "_____no_output_____" ] ], [ [ "df.describe(include=['object'])", "_____no_output_____" ] ], [ [ "### Value Counts\n\nSometimes, we need to understand the distribution of the categorical data. This could mean understanding how many units of each characteristic/variable we have. `value_counts()` is a method in pandas that can help with it. If we use it with a series, it will give us the unique values and how many of them exist.\n\n_Caution:_ Using it with DataFrame works like count of unique rows by combination of all columns (like in SQL). This may or may not be what you want. For example, using it with drive-wheels and engine-location would give you the number of rows with unique pair of values. \n\nHere's an example of doing it with the drive-wheels column.", "_____no_output_____" ] ], [ [ "df['drive-wheels'].value_counts().to_frame()", "_____no_output_____" ] ], [ [ "`.to_frame()` method is added to make it into a dataframe, hence making it look better.\n\nYou can play around and rename the column and index name if you want. \n\nWe can repeat the above process for the variable 'engine-location'.", "_____no_output_____" ] ], [ [ "df['engine-location'].value_counts().to_frame()", "_____no_output_____" ] ], [ [ "Examining the value counts of the engine location would not be a good predictor variable for the price. This is because we only have three cars with a rear engine and 198 with an engine in the front, this result is skewed. Thus, we are not able to draw any conclusions about the engine location.", "_____no_output_____" ], [ "## Grouping\n\nGrouping is a useful technique to explore the data. With grouping, we can split data and apply various transforms. For example, we can find out the mean of different body styles. This would help us to have more insight into whether there's a relationsip between our target variable and the variable we are using grouping on.\n\nAlthough oftenly used on categorical data, grouping can also be used with numerical data by seperating them into categories. For example we might seperate car by prices into affordable and luxury groups.\n\nIn pandas, we can use the `groupby()` method. \n\nLet's try it with the 'drive-wheels' variable. First we will find out how many unique values there are. We do that by `unique()` method.", "_____no_output_____" ] ], [ [ "df['drive-wheels'].unique()", "_____no_output_____" ] ], [ [ "If we want to know, on average, which type of drive wheel is most valuable, we can group \"drive-wheels\" and then average them.", "_____no_output_____" ] ], [ [ "df[['drive-wheels','body-style','price']].groupby(['drive-wheels']).mean()", "_____no_output_____" ] ], [ [ "From our data, it seems rear-wheel drive vehicles are, on average, the most expensive, while 4-wheel and front-wheel are approximately the same in price.\n\nIt's also possible to group with multiple variables. For example, let's group by both 'drive-wheels' and 'body-style'. This groups the dataframe by the unique combinations 'drive-wheels' and 'body-style'.\n\nLet's store it in the variable `grouped_by_wheels_and_body`.", "_____no_output_____" ] ], [ [ "grouped_by_wheels_and_body = df[['drive-wheels','body-style','price']].groupby(['drive-wheels','body-style']).mean()\ngrouped_by_wheels_and_body", "_____no_output_____" ] ], [ [ "Although incredibly useful, it's a little hard to read. It's better to convert it to a pivot table.\n\nA pivot table is like an Excel spreadsheet, with one variable along the column and another along the row. There are various ways to do so. A way to do that is to use the method `pivot()`. However, with groups like the one above (multi-index), one can simply call the `unstack()` method. ", "_____no_output_____" ] ], [ [ "grouped_by_wheels_and_body = grouped_by_wheels_and_body.unstack()\ngrouped_by_wheels_and_body", "_____no_output_____" ] ], [ [ "Often, we won't have data for some of the pivot cells. Often, it's filled with the value 0, but any other value could potentially be used as well. This could be mean or some other flag.", "_____no_output_____" ] ], [ [ "grouped_by_wheels_and_body.fillna(0)", "_____no_output_____" ] ], [ [ "Let's do the same for body-style only", "_____no_output_____" ] ], [ [ "df[['price', 'body-style']].groupby('body-style').mean()", "_____no_output_____" ] ], [ [ "### Visualizing Groups\n\nHeatmaps are a great way to visualize groups. They can show relationships clearly in this case. \n\nDo note that you need to be careful with the color schemes. Since chosing appropriate colorscheme is not only appropriate for your 'story' of the data, it is also important since it can impact the perception of the data. \n\n[This resource](https://matplotlib.org/tutorials/colors/colormaps.html) gives a great idea on what to choose as a color scheme and when it's appropriate. It also has samples of the scheme below too for a quick preview along with when should one use them.\n\nHere's an example of using it with the pivot table we created with the `seaborn` package.", "_____no_output_____" ] ], [ [ "sns.heatmap(grouped_by_wheels_and_body, cmap=\"Blues\");", "_____no_output_____" ] ], [ [ "This heatmap plots the target variable (price) proportional to colour with respect to the variables 'drive-wheel' and 'body-style' in the vertical and horizontal axis respectively. This allows us to visualize how the price is related to 'drive-wheel' and 'body-style'.", "_____no_output_____" ], [ "## Correlation and Causation", "_____no_output_____" ], [ "Correlation and causation are terms that are used often and confused with each other--or worst considered to imply the other. Here's a quick overview of them: \n\n__Correlation__: The degree of association (or resemblance) of variables with each other.\n\n__Causation__: A relationship of cause and effect between variables.\n\nIt is important to know the difference between these two.\n\nNote that correlation does __not__ imply causation. \n\nDetermining correlation is much simpler. We can almost always use methods such as Pearson Correlation, ANOVA method, and graphs. Determining causation may require independent experimentation.", "_____no_output_____" ], [ "### Pearson Correlation\n\nDescribed earlier, Pearson Correlation is great way to measure linear dependence between two variables. It's also the default method in the method corr.\n", "_____no_output_____" ] ], [ [ "df.corr()", "_____no_output_____" ] ], [ [ "### Cramer's V\n\nCramer's V is a great method to calculate the relationship between two categorical variables. Read above about Cramer's V to get a better estimate.\n\n**General Rule of Thumb:**\n* V ∈ [0.1,0.3]: weak association\n* V ∈ [0.4,0.5]: medium association\n* V > 0.5: strong association", "_____no_output_____" ], [ "### ANOVA Method\n\nAs discussed previously, ANOVA method is great to conduct analysis to determine whether there's a significant realtionship between categorical and continous variables. Check out the ANOVA section above for more details.", "_____no_output_____" ], [ "Now, just knowing the correlation statistics is not enough. We also need to know whether the relationship is statistically significant or not. We can use p-value for that.", "_____no_output_____" ], [ "### P-value\n\nIn very simple terms, p-value checks the probability whether the result we have could be just a random chance. For example, for a p-value of 0.05, we are certain that our results are insignificant about 5% of time and are significant 95% of the time.\n\nIt's recommended to define a tolerance level of the p-value beforehand. Here's some common interpretations of p-value:\n\n* The p-value is $<$ 0.001: A strong evidence that the correlation is significant.\n* The p-value is $<$ 0.05: A moderate evidence that the correlation is significant.\n* The p-value is $<$ 0.1: A weak evidence that the correlation is significant.\n* The p-value is $>$ 0.1: No evidence that the correlation is significant.", "_____no_output_____" ], [ "We can obtain this information using `stats` module in the `scipy` library.", "_____no_output_____" ], [ "Let's calculate it for wheel-base vs price", "_____no_output_____" ] ], [ [ "pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price'])\nprint(\"The Pearson Correlation Coefficient is\", pearson_coef, \" with a P-value of P =\", p_value) ", "The Pearson Correlation Coefficient is 0.5846418222655081 with a P-value of P = 8.076488270732989e-20\n" ] ], [ [ "Since the p-value is $<$ 0.001, the correlation between wheel-base and price is statistically significant, although the linear relationship isn't extremely strong (~0.585)\n\nLet's try one more example: horsepower vs price.", "_____no_output_____" ] ], [ [ "pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price'])\nprint(\"The Pearson Correlation Coefficient is\", pearson_coef, \" with a P-value of P = \", p_value) ", "The Pearson Correlation Coefficient is 0.809574567003656 with a P-value of P = 6.369057428259557e-48\n" ] ], [ [ "Since the p-value is $<$ 0.001, the correlation between horsepower and price is statistically significant, and the linear relationship is quite strong (~0.809, close to 1).\n", "_____no_output_____" ], [ "### Conclusion: Important Variables\n\nWe now have a better idea of what our data looks like and which variables are important to take into account when predicting the car price. Some more analysis later, we can find that the important variables are: \n\nContinuous numerical variables:\n* Length\n* Width\n* Curb-weight\n* Engine-size\n* Horsepower\n* City-mpg\n* Highway-mpg\n* Wheel-base\n* Bore\n \nCategorical variables:\n* Drive-wheels\n\nIf needed, we can now mone onto into building machine learning models as we now know what to feed our model.\n\nP.S. [This medium article](https://medium.com/@outside2SDs/an-overview-of-correlation-measures-between-categorical-and-continuous-variables-4c7f85610365#:~:text=A%20simple%20approach%20could%20be,variance%20of%20the%20continuous%20variable.&text=If%20the%20variables%20have%20no,similar%20to%20the%20original%20variance) is a great resource that talks about various ways of correlation between categorical and continous variables. ", "_____no_output_____" ], [ "## Author\nBy Abhinav Garg", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d0589d3997b17db116fc1acd568dc4fe4b2affbd
680,801
ipynb
Jupyter Notebook
MNIST_Recognizer.ipynb
ColdBacon/Digit-recognizer
af039cf16eff02595cd9806cbc4e0ee314970be6
[ "MIT" ]
null
null
null
MNIST_Recognizer.ipynb
ColdBacon/Digit-recognizer
af039cf16eff02595cd9806cbc4e0ee314970be6
[ "MIT" ]
null
null
null
MNIST_Recognizer.ipynb
ColdBacon/Digit-recognizer
af039cf16eff02595cd9806cbc4e0ee314970be6
[ "MIT" ]
null
null
null
268.666535
19,132
0.882202
[ [ [ "#### Bogumiła Walkowiak [email protected]\n\n#### Joachim Mąkowski [email protected]\n\n# Intelligent Systems: Reasoning and Recognition\n## Recognizing Digits using Neural Networks", "_____no_output_____" ], [ "## 1. Introduction\n\n<font size=4>The MNIST (Modified National Institute of Standards and Technology) dataset is a large collection of handwritten digits composed of 60,000 training images and 10,000 test images. The black and white images from NIST were normalized to fit into a 28x28 pixel bounding box and anti-aliased, which introduced gray-scale levels. Our task was to design and evaluate neural network architectures that can recognize hand-drawn digits using the grayscale this data set.", "_____no_output_____" ], [ "## 2. Data preparation\n\n<font size=4>First of all, we downloaded MNIST data. We decided to combine train and test set provided by MNIST dataset and then we splited data into training set 90% and a test set 10%. In the further part of the project, we'll also create a validation set so the final split of the data will look like this: training data 80%, validating data 10% and testing data 10%.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow.compat.v1.keras.backend as K\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom tensorflow.keras import layers\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score,roc_curve,auc\n\nfrom sklearn.metrics import confusion_matrix, plot_confusion_matrix\n#physical_devices = tf.config.list_physical_devices('GPU') \n#tf.config.experimental.set_memory_growth(physical_devices[0], True)", "_____no_output_____" ], [ "(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() #loading the data set", "_____no_output_____" ], [ "X = np.concatenate((x_train, x_test)) \ny = np.concatenate([y_train, y_test])", "_____no_output_____" ], [ "train_ratio = 0.9 \ntest_ratio = 0.1\n\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size = test_ratio)", "_____no_output_____" ], [ "plt.imshow(x_train[0], cmap='gray')", "_____no_output_____" ], [ "x_train = x_train.astype(\"float32\") / 255\nx_test = x_test.astype(\"float32\") / 255\n\n# images have shape (28, 28, 1)\nx_train = np.expand_dims(x_train, -1)\nx_test = np.expand_dims(x_test, -1)", "_____no_output_____" ], [ "y_train = keras.utils.to_categorical(y_train, 10)\ny_test = keras.utils.to_categorical(y_test, 10)", "_____no_output_____" ] ], [ [ "## 3. Creating neural networks", "_____no_output_____" ], [ "<font size=4>We decided to create a function. Thanks to it we will be able to write less code. Function trains model provided by argument of function, prints model's loss, accuracy, precision, recall and AUC for each digit and plots a history of training.", "_____no_output_____" ] ], [ [ "def predict_model(model, callbacks = [],batch_size=128, epochs = 4,lr=0.001):\n adam = keras.optimizers.Adam(lr=lr)\n model.compile(loss=\"categorical_crossentropy\", optimizer=adam, metrics=[\"accuracy\", \"Precision\",\"Recall\"])\n\n history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.11, callbacks=callbacks)\n score = model.evaluate(x_test, y_test, verbose=0)\n y_pred = model.predict(x_test)\n print(\"Test loss:\", score[0])\n print(\"Test accuracy:\", score[1])\n print(\"Test precision:\", score[2])\n print(\"Test recall:\", score[3])\n \n y_pred = np.argmax(y_pred,axis=1)\n y_test1 = np.argmax(y_test,axis=1)\n \n print(\"Test f1 score:\", f1_score(y_test1,y_pred,average='micro'))\n for i in range(10):\n temp_pred = [1 if x==i else 0 for x in y_pred]\n temp_test = [1 if x==i else 0 for x in y_test1]\n fpr, tpr, thresholds =roc_curve(temp_test,temp_pred)\n\n print(\"Test AUC for digit:\",i, auc(fpr, tpr))\n \n # summarize history for accuracy\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()", "_____no_output_____" ] ], [ [ "<font size=4>We added an instance of EarlyStopping class, which provides us a mechanism of stopping algorithm before the whole training process is done. When 3 epochs are not achieving a better result (in our example higher validation accuracy) then our training is stopped and we restore the best model.", "_____no_output_____" ] ], [ [ "# simple early stopping\nes = keras.callbacks.EarlyStopping(monitor='val_accuracy', mode='max', verbose=1, patience = 3, restore_best_weights=True)", "_____no_output_____" ] ], [ [ "### Basic Fully Connected Multi-layer Network", "_____no_output_____" ], [ "<font size=4>The first network we have created is basic fully connected mutli-layer network:", "_____no_output_____" ] ], [ [ "model_fc = keras.Sequential([\n layers.Dense(32, activation=\"relu\",input_shape=(28,28,1)),\n layers.Dense(64, activation=\"relu\"),\n \n layers.Flatten(),\n layers.Dense(128, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc.summary()\npredict_model(model_fc, [es], epochs=100)", "Model: \"sequential_20\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_59 (Dense) (None, 28, 28, 32) 64 \n_________________________________________________________________\ndense_60 (Dense) (None, 28, 28, 64) 2112 \n_________________________________________________________________\nflatten_20 (Flatten) (None, 50176) 0 \n_________________________________________________________________\ndense_61 (Dense) (None, 128) 6422656 \n_________________________________________________________________\ndropout_19 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_62 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 6,426,122\nTrainable params: 6,426,122\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 6s 13ms/step - loss: 0.5066 - accuracy: 0.8487 - precision: 0.9149 - recall: 0.7786 - val_loss: 0.1463 - val_accuracy: 0.9586 - val_precision: 0.9667 - val_recall: 0.9504\nEpoch 2/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1643 - accuracy: 0.9510 - precision: 0.9601 - recall: 0.9434 - val_loss: 0.1087 - val_accuracy: 0.9683 - val_precision: 0.9707 - val_recall: 0.9661\nEpoch 3/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1254 - accuracy: 0.9611 - precision: 0.9680 - recall: 0.9554 - val_loss: 0.0970 - val_accuracy: 0.9729 - val_precision: 0.9770 - val_recall: 0.9698\nEpoch 4/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1044 - accuracy: 0.9666 - precision: 0.9716 - recall: 0.9629 - val_loss: 0.0932 - val_accuracy: 0.9736 - val_precision: 0.9756 - val_recall: 0.9711\nEpoch 5/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.0893 - accuracy: 0.9704 - precision: 0.9740 - recall: 0.9670 - val_loss: 0.0962 - val_accuracy: 0.9720 - val_precision: 0.9748 - val_recall: 0.9710\nEpoch 6/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.0824 - accuracy: 0.9726 - precision: 0.9757 - recall: 0.9695 - val_loss: 0.0853 - val_accuracy: 0.9773 - val_precision: 0.9801 - val_recall: 0.9746\nEpoch 7/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.0750 - accuracy: 0.9763 - precision: 0.9789 - recall: 0.9733 - val_loss: 0.0796 - val_accuracy: 0.9769 - val_precision: 0.9796 - val_recall: 0.9755\nEpoch 8/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.0664 - accuracy: 0.9776 - precision: 0.9805 - recall: 0.9752 - val_loss: 0.0869 - val_accuracy: 0.9766 - val_precision: 0.9787 - val_recall: 0.9755\nEpoch 9/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.0621 - accuracy: 0.9788 - precision: 0.9806 - recall: 0.9770 - val_loss: 0.0832 - val_accuracy: 0.9766 - val_precision: 0.9783 - val_recall: 0.9758\nRestoring model weights from the end of the best epoch.\nEpoch 00009: early stopping\nTest loss: 0.09832347929477692\nTest accuracy: 0.973714292049408\nTest precision: 0.9774295687675476\nTest recall: 0.9712857007980347\nTest f1 score: 0.9737142857142858\nTest AUC for digit: 0 0.9907399351644153\nTest AUC for digit: 1 0.9923331284991935\nTest AUC for digit: 2 0.9870421148228989\nTest AUC for digit: 3 0.9851853814963031\nTest AUC for digit: 4 0.9845406523282492\nTest AUC for digit: 5 0.9781305833322657\nTest AUC for digit: 6 0.9906349206349208\nTest AUC for digit: 7 0.9875151552516612\nTest AUC for digit: 8 0.974920634920635\nTest AUC for digit: 9 0.9815057646170433\n" ] ], [ [ "<font size=4>This is basic model achieves about 97,5% accuracy on test set. It is made of 2 hidden layers with reasonable number of units. Training this model is quite fast (on my laptop it was 5s per epoch, using GPU).\nAs we see in plots our model started to overfits, because validation accuracy and loss was staying on the same level, while train accuracy was growing and loss was decreasing.", "_____no_output_____" ], [ "<font size=4>Next, we wanted to demonstrate the effect of changing various parameters of the network.", "_____no_output_____" ], [ "### Different number of layers", "_____no_output_____" ] ], [ [ "model_fc_small = keras.Sequential([\n layers.Dense(32, activation=\"relu\",input_shape=(28,28,1)),\n \n layers.Flatten(),\n layers.Dense(64, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc_small.summary()\npredict_model(model_fc_small, [es], epochs=100)", "Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_4 (Dense) (None, 28, 28, 32) 64 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 25088) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 64) 1605696 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_6 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 1,606,410\nTrainable params: 1,606,410\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 11s 22ms/step - loss: 0.5918 - accuracy: 0.8171 - precision: 0.9070 - recall: 0.7388 - val_loss: 0.1764 - val_accuracy: 0.9483 - val_precision: 0.9603 - val_recall: 0.9381\nEpoch 2/100\n439/439 [==============================] - 9s 21ms/step - loss: 0.2418 - accuracy: 0.9282 - precision: 0.9433 - recall: 0.9133 - val_loss: 0.1424 - val_accuracy: 0.9557 - val_precision: 0.9640 - val_recall: 0.9494\nEpoch 3/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1958 - accuracy: 0.9404 - precision: 0.9512 - recall: 0.9299 - val_loss: 0.1180 - val_accuracy: 0.9649 - val_precision: 0.9711 - val_recall: 0.9587\nEpoch 4/100\n439/439 [==============================] - 9s 21ms/step - loss: 0.1628 - accuracy: 0.9478 - precision: 0.9577 - recall: 0.9405 - val_loss: 0.1205 - val_accuracy: 0.9635 - val_precision: 0.9701 - val_recall: 0.9589\nEpoch 5/100\n439/439 [==============================] - 9s 22ms/step - loss: 0.1513 - accuracy: 0.9516 - precision: 0.9591 - recall: 0.9447 - val_loss: 0.1079 - val_accuracy: 0.9680 - val_precision: 0.9741 - val_recall: 0.9644\nEpoch 6/100\n439/439 [==============================] - 9s 22ms/step - loss: 0.1431 - accuracy: 0.9555 - precision: 0.9634 - recall: 0.9489 - val_loss: 0.1074 - val_accuracy: 0.9690 - val_precision: 0.9747 - val_recall: 0.9674\nEpoch 7/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1351 - accuracy: 0.9565 - precision: 0.9637 - recall: 0.9509 - val_loss: 0.1093 - val_accuracy: 0.9694 - val_precision: 0.9741 - val_recall: 0.9646\nEpoch 8/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1319 - accuracy: 0.9587 - precision: 0.9651 - recall: 0.9528 - val_loss: 0.1047 - val_accuracy: 0.9703 - val_precision: 0.9741 - val_recall: 0.9674\nEpoch 9/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1188 - accuracy: 0.9614 - precision: 0.9674 - recall: 0.9562 - val_loss: 0.1044 - val_accuracy: 0.9706 - val_precision: 0.9747 - val_recall: 0.9670\nEpoch 10/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1194 - accuracy: 0.9610 - precision: 0.9666 - recall: 0.9566 - val_loss: 0.1080 - val_accuracy: 0.9724 - val_precision: 0.9749 - val_recall: 0.9697\nEpoch 11/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1116 - accuracy: 0.9636 - precision: 0.9691 - recall: 0.9591 - val_loss: 0.1011 - val_accuracy: 0.9732 - val_precision: 0.9760 - val_recall: 0.9693\nEpoch 12/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1030 - accuracy: 0.9653 - precision: 0.9705 - recall: 0.9611 - val_loss: 0.1011 - val_accuracy: 0.9722 - val_precision: 0.9743 - val_recall: 0.9698\nEpoch 13/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1039 - accuracy: 0.9663 - precision: 0.9707 - recall: 0.9623 - val_loss: 0.1028 - val_accuracy: 0.9729 - val_precision: 0.9764 - val_recall: 0.9713\nEpoch 14/100\n439/439 [==============================] - 10s 22ms/step - loss: 0.1029 - accuracy: 0.9668 - precision: 0.9710 - recall: 0.9625 - val_loss: 0.0977 - val_accuracy: 0.9730 - val_precision: 0.9772 - val_recall: 0.9717\nEpoch 00014: early stopping\nTest loss: 0.1229480430483818\nTest accuracy: 0.9679999947547913\nTest precision: 0.9723661541938782\nTest recall: 0.9651428461074829\nTest f1 score: 0.968\nTest AUC for digit: 0 0.9900292592326402\nTest AUC for digit: 1 0.9906050941195285\nTest AUC for digit: 2 0.9857744868717027\nTest AUC for digit: 3 0.9805160602125461\nTest AUC for digit: 4 0.981489857731353\nTest AUC for digit: 5 0.9728804648288869\nTest AUC for digit: 6 0.9881746031746033\nTest AUC for digit: 7 0.9815794883823452\nTest AUC for digit: 8 0.9792063492063492\nTest AUC for digit: 9 0.9701437036094706\n" ], [ "model_fc_large = keras.Sequential([\n layers.Dense(32, activation=\"relu\",input_shape=(28,28,1)),\n layers.Dense(64, activation=\"relu\"),\n layers.Flatten(),\n layers.Dense(4096, activation=\"relu\"),\n layers.Dense(1024, activation=\"relu\"),\n layers.Dense(64, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc_large.summary()\npredict_model(model_fc_large, [es], epochs=100)", "Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_7 (Dense) (None, 28, 28, 32) 64 \n_________________________________________________________________\ndense_8 (Dense) (None, 28, 28, 64) 2112 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 50176) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 4096) 205524992 \n_________________________________________________________________\ndense_10 (Dense) (None, 1024) 4195328 \n_________________________________________________________________\ndense_11 (Dense) (None, 64) 65600 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_12 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 209,788,746\nTrainable params: 209,788,746\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 55s 120ms/step - loss: 0.5493 - accuracy: 0.8354 - precision: 0.9044 - recall: 0.7825 - val_loss: 0.1236 - val_accuracy: 0.9652 - val_precision: 0.9722 - val_recall: 0.9605\nEpoch 2/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.1123 - accuracy: 0.9679 - precision: 0.9741 - recall: 0.9620 - val_loss: 0.0866 - val_accuracy: 0.9762 - val_precision: 0.9798 - val_recall: 0.9717\nEpoch 3/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0735 - accuracy: 0.9787 - precision: 0.9823 - recall: 0.9749 - val_loss: 0.0845 - val_accuracy: 0.9747 - val_precision: 0.9812 - val_recall: 0.9709\nEpoch 4/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0509 - accuracy: 0.9847 - precision: 0.9870 - recall: 0.9826 - val_loss: 0.0801 - val_accuracy: 0.9778 - val_precision: 0.9823 - val_recall: 0.9752\nEpoch 5/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0464 - accuracy: 0.9860 - precision: 0.9879 - recall: 0.9842 - val_loss: 0.0887 - val_accuracy: 0.9781 - val_precision: 0.9806 - val_recall: 0.9762\nEpoch 6/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0356 - accuracy: 0.9898 - precision: 0.9909 - recall: 0.9882 - val_loss: 0.0897 - val_accuracy: 0.9804 - val_precision: 0.9828 - val_recall: 0.9785\nEpoch 7/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0276 - accuracy: 0.9921 - precision: 0.9929 - recall: 0.9913 - val_loss: 0.0870 - val_accuracy: 0.9821 - val_precision: 0.9832 - val_recall: 0.9811\nEpoch 8/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0234 - accuracy: 0.9928 - precision: 0.9935 - recall: 0.9920 - val_loss: 0.0820 - val_accuracy: 0.9804 - val_precision: 0.9823 - val_recall: 0.9776\nEpoch 9/100\n439/439 [==============================] - 33s 75ms/step - loss: 0.0215 - accuracy: 0.9939 - precision: 0.9947 - recall: 0.9931 - val_loss: 0.1025 - val_accuracy: 0.9801 - val_precision: 0.9812 - val_recall: 0.9786\nEpoch 10/100\n439/439 [==============================] - 32s 73ms/step - loss: 0.0220 - accuracy: 0.9939 - precision: 0.9943 - recall: 0.9933 - val_loss: 0.0775 - val_accuracy: 0.9828 - val_precision: 0.9849 - val_recall: 0.9821\nEpoch 11/100\n439/439 [==============================] - 34s 77ms/step - loss: 0.0177 - accuracy: 0.9949 - precision: 0.9953 - recall: 0.9944 - val_loss: 0.0929 - val_accuracy: 0.9843 - val_precision: 0.9850 - val_recall: 0.9831\nEpoch 12/100\n439/439 [==============================] - 34s 79ms/step - loss: 0.0138 - accuracy: 0.9958 - precision: 0.9962 - recall: 0.9955 - val_loss: 0.0812 - val_accuracy: 0.9815 - val_precision: 0.9828 - val_recall: 0.9808\nEpoch 13/100\n439/439 [==============================] - 34s 77ms/step - loss: 0.0136 - accuracy: 0.9963 - precision: 0.9965 - recall: 0.9960 - val_loss: 0.0883 - val_accuracy: 0.9824 - val_precision: 0.9834 - val_recall: 0.9818\nEpoch 14/100\n439/439 [==============================] - 34s 77ms/step - loss: 0.0117 - accuracy: 0.9965 - precision: 0.9967 - recall: 0.9963 - val_loss: 0.0809 - val_accuracy: 0.9844 - val_precision: 0.9858 - val_recall: 0.9844\nEpoch 15/100\n439/439 [==============================] - 34s 78ms/step - loss: 0.0107 - accuracy: 0.9971 - precision: 0.9972 - recall: 0.9968 - val_loss: 0.1161 - val_accuracy: 0.9805 - val_precision: 0.9811 - val_recall: 0.9798\nEpoch 16/100\n439/439 [==============================] - 33s 74ms/step - loss: 0.0152 - accuracy: 0.9959 - precision: 0.9960 - recall: 0.9955 - val_loss: 0.1062 - val_accuracy: 0.9815 - val_precision: 0.9827 - val_recall: 0.9814\nEpoch 17/100\n439/439 [==============================] - 34s 78ms/step - loss: 0.0097 - accuracy: 0.9971 - precision: 0.9973 - recall: 0.9970 - val_loss: 0.0868 - val_accuracy: 0.9830 - val_precision: 0.9841 - val_recall: 0.9824\nEpoch 00017: early stopping\nTest loss: 0.09394106268882751\nTest accuracy: 0.9814285635948181\nTest precision: 0.9819768071174622\nTest recall: 0.9807142615318298\nTest f1 score: 0.9814285714285714\nTest AUC for digit: 0 0.9988155401137082\nTest AUC for digit: 1 0.9947057004231706\nTest AUC for digit: 2 0.9863037767355775\nTest AUC for digit: 3 0.9898547027800599\nTest AUC for digit: 4 0.9875124205303294\nTest AUC for digit: 5 0.9887902209336588\nTest AUC for digit: 6 0.9899999999999999\nTest AUC for digit: 7 0.9909183219141172\nTest AUC for digit: 8 0.9815079365079366\nTest AUC for digit: 9 0.987926026320382\n" ] ], [ [ "<font size=4>Firstly, we tried different numbers of hidden layers. With 1 hidden layer the model the model was achieving around 96,5% on test set. The model is underfitted because this number of layers is not enough to explain the complexity of our data.\n\nModel with 4 hidden layers achieved 98,1% of accuracy but the training time was pretty long (34s per epoch). That is because this model had to find weights for over 200,000,000 parameters (compering to 1,600,000 of params for model with 1 hidden layer). We can assume, that after second epoch our model is overfitted because the difference between validation and train loss and accuracy are high.", "_____no_output_____" ], [ "### Different number of units per layer", "_____no_output_____" ] ], [ [ "model_fc = keras.Sequential([\n layers.Dense(10, activation=\"relu\",input_shape=(28,28,1)),\n layers.Dense(20, activation=\"relu\"),\n \n layers.Flatten(),\n layers.Dense(40, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc.summary()\npredict_model(model_fc, [es], epochs=100)", "Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_13 (Dense) (None, 28, 28, 10) 20 \n_________________________________________________________________\ndense_14 (Dense) (None, 28, 28, 20) 220 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 15680) 0 \n_________________________________________________________________\ndense_15 (Dense) (None, 40) 627240 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 40) 0 \n_________________________________________________________________\ndense_16 (Dense) (None, 10) 410 \n=================================================================\nTotal params: 627,890\nTrainable params: 627,890\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 4s 8ms/step - loss: 0.6942 - accuracy: 0.7885 - precision: 0.8959 - recall: 0.6762 - val_loss: 0.2011 - val_accuracy: 0.9401 - val_precision: 0.9534 - val_recall: 0.9297\nEpoch 2/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2938 - accuracy: 0.9101 - precision: 0.9330 - recall: 0.8907 - val_loss: 0.1577 - val_accuracy: 0.9567 - val_precision: 0.9660 - val_recall: 0.9476\nEpoch 3/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2441 - accuracy: 0.9252 - precision: 0.9418 - recall: 0.9100 - val_loss: 0.1455 - val_accuracy: 0.9571 - val_precision: 0.9666 - val_recall: 0.9519\nEpoch 4/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2146 - accuracy: 0.9344 - precision: 0.9488 - recall: 0.9216 - val_loss: 0.1342 - val_accuracy: 0.9613 - val_precision: 0.9710 - val_recall: 0.9534\nEpoch 5/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2027 - accuracy: 0.9370 - precision: 0.9503 - recall: 0.9245 - val_loss: 0.1228 - val_accuracy: 0.9639 - val_precision: 0.9711 - val_recall: 0.9589\nEpoch 6/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1886 - accuracy: 0.9395 - precision: 0.9515 - recall: 0.9284 - val_loss: 0.1218 - val_accuracy: 0.9613 - val_precision: 0.9689 - val_recall: 0.9582\nEpoch 7/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.1840 - accuracy: 0.9408 - precision: 0.9528 - recall: 0.9316 - val_loss: 0.1211 - val_accuracy: 0.9649 - val_precision: 0.9713 - val_recall: 0.9612\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1801 - accuracy: 0.9424 - precision: 0.9536 - recall: 0.9319 - val_loss: 0.1177 - val_accuracy: 0.9646 - val_precision: 0.9717 - val_recall: 0.9613\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1690 - accuracy: 0.9457 - precision: 0.9563 - recall: 0.9362 - val_loss: 0.1225 - val_accuracy: 0.9657 - val_precision: 0.9714 - val_recall: 0.9615\nEpoch 10/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1685 - accuracy: 0.9465 - precision: 0.9569 - recall: 0.9375 - val_loss: 0.1151 - val_accuracy: 0.9675 - val_precision: 0.9729 - val_recall: 0.9644\nEpoch 11/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1615 - accuracy: 0.9484 - precision: 0.9589 - recall: 0.9397 - val_loss: 0.1208 - val_accuracy: 0.9658 - val_precision: 0.9719 - val_recall: 0.9596\nEpoch 12/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1573 - accuracy: 0.9494 - precision: 0.9584 - recall: 0.9422 - val_loss: 0.1155 - val_accuracy: 0.9685 - val_precision: 0.9716 - val_recall: 0.9641\nEpoch 13/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1518 - accuracy: 0.9498 - precision: 0.9592 - recall: 0.9414 - val_loss: 0.1165 - val_accuracy: 0.9658 - val_precision: 0.9719 - val_recall: 0.9616\nEpoch 14/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1459 - accuracy: 0.9534 - precision: 0.9620 - recall: 0.9461 - val_loss: 0.1136 - val_accuracy: 0.9670 - val_precision: 0.9723 - val_recall: 0.9641\nEpoch 15/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1485 - accuracy: 0.9509 - precision: 0.9603 - recall: 0.9437 - val_loss: 0.1178 - val_accuracy: 0.9667 - val_precision: 0.9704 - val_recall: 0.9641\nEpoch 00015: early stopping\nTest loss: 0.12980997562408447\nTest accuracy: 0.9620000123977661\nTest precision: 0.9678164124488831\nTest recall: 0.9580000042915344\nTest f1 score: 0.962\nTest AUC for digit: 0 0.9925116601919346\nTest AUC for digit: 1 0.9898799893821776\nTest AUC for digit: 2 0.9862624960357879\nTest AUC for digit: 3 0.9697247767206424\nTest AUC for digit: 4 0.9822645042790503\nTest AUC for digit: 5 0.9736597292505758\nTest AUC for digit: 6 0.9884920634920635\nTest AUC for digit: 7 0.9744171471575807\nTest AUC for digit: 8 0.9637301587301589\nTest AUC for digit: 9 0.9673563983253335\n" ] ], [ [ "<font size=4>In this situation we trained a model with small number of units in each layer. The model didn't achieve it's best. We can see, that train accuracy is much lower than validation accuracy. It is caused by insufficient number of units, so that our model decided to choose higher accuracy in validation data at the expense of accuracy on whole data.", "_____no_output_____" ] ], [ [ "model_fc = keras.Sequential([\n layers.Dense(100, activation=\"relu\",input_shape=(28,28,1)),\n layers.Dense(200, activation=\"relu\"),\n \n layers.Flatten(),\n layers.Dense(400, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc.summary()\npredict_model(model_fc, [es], epochs=100)", "Model: \"sequential_4\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_17 (Dense) (None, 28, 28, 100) 200 \n_________________________________________________________________\ndense_18 (Dense) (None, 28, 28, 200) 20200 \n_________________________________________________________________\nflatten_4 (Flatten) (None, 156800) 0 \n_________________________________________________________________\ndense_19 (Dense) (None, 400) 62720400 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 400) 0 \n_________________________________________________________________\ndense_20 (Dense) (None, 10) 4010 \n=================================================================\nTotal params: 62,744,810\nTrainable params: 62,744,810\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 20s 45ms/step - loss: 0.3810 - accuracy: 0.8801 - precision: 0.9206 - recall: 0.8528 - val_loss: 0.1177 - val_accuracy: 0.9678 - val_precision: 0.9722 - val_recall: 0.9632\nEpoch 2/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.1207 - accuracy: 0.9633 - precision: 0.9690 - recall: 0.9591 - val_loss: 0.0924 - val_accuracy: 0.9750 - val_precision: 0.9782 - val_recall: 0.9726\nEpoch 3/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0939 - accuracy: 0.9698 - precision: 0.9735 - recall: 0.9668 - val_loss: 0.0772 - val_accuracy: 0.9772 - val_precision: 0.9801 - val_recall: 0.9756\nEpoch 4/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0664 - accuracy: 0.9785 - precision: 0.9809 - recall: 0.9767 - val_loss: 0.0717 - val_accuracy: 0.9804 - val_precision: 0.9828 - val_recall: 0.9789\nEpoch 5/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0588 - accuracy: 0.9814 - precision: 0.9831 - recall: 0.9800 - val_loss: 0.0762 - val_accuracy: 0.9789 - val_precision: 0.9824 - val_recall: 0.9771\nEpoch 6/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0463 - accuracy: 0.9847 - precision: 0.9860 - recall: 0.9832 - val_loss: 0.0695 - val_accuracy: 0.9808 - val_precision: 0.9831 - val_recall: 0.9798\nEpoch 7/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0436 - accuracy: 0.9851 - precision: 0.9862 - recall: 0.9839 - val_loss: 0.0762 - val_accuracy: 0.9797 - val_precision: 0.9813 - val_recall: 0.9784\nEpoch 8/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0367 - accuracy: 0.9878 - precision: 0.9884 - recall: 0.9864 - val_loss: 0.0631 - val_accuracy: 0.9823 - val_precision: 0.9836 - val_recall: 0.9808\nEpoch 9/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0343 - accuracy: 0.9876 - precision: 0.9886 - recall: 0.9868 - val_loss: 0.0663 - val_accuracy: 0.9807 - val_precision: 0.9830 - val_recall: 0.9782\nEpoch 10/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0300 - accuracy: 0.9894 - precision: 0.9902 - recall: 0.9889 - val_loss: 0.0815 - val_accuracy: 0.9828 - val_precision: 0.9831 - val_recall: 0.9820\nEpoch 11/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0253 - accuracy: 0.9917 - precision: 0.9922 - recall: 0.9912 - val_loss: 0.0702 - val_accuracy: 0.9834 - val_precision: 0.9847 - val_recall: 0.9830\nEpoch 12/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0224 - accuracy: 0.9931 - precision: 0.9933 - recall: 0.9928 - val_loss: 0.0760 - val_accuracy: 0.9818 - val_precision: 0.9831 - val_recall: 0.9811\nEpoch 13/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0239 - accuracy: 0.9915 - precision: 0.9921 - recall: 0.9910 - val_loss: 0.0783 - val_accuracy: 0.9798 - val_precision: 0.9818 - val_recall: 0.9789\nEpoch 14/100\n439/439 [==============================] - 19s 44ms/step - loss: 0.0202 - accuracy: 0.9927 - precision: 0.9931 - recall: 0.9925 - val_loss: 0.0734 - val_accuracy: 0.9808 - val_precision: 0.9828 - val_recall: 0.9795\nEpoch 00014: early stopping\nTest loss: 0.08533725142478943\nTest accuracy: 0.9795714020729065\nTest precision: 0.9812401533126831\nTest recall: 0.978857159614563\nTest f1 score: 0.9795714285714285\nTest AUC for digit: 0 0.993617156085807\nTest AUC for digit: 1 0.9955623103018043\nTest AUC for digit: 2 0.9869502393935624\nTest AUC for digit: 3 0.9921123419818196\nTest AUC for digit: 4 0.9906739460192185\nTest AUC for digit: 5 0.9834615107924303\nTest AUC for digit: 6 0.993095238095238\nTest AUC for digit: 7 0.990522564434301\nTest AUC for digit: 8 0.976031746031746\nTest AUC for digit: 9 0.9826922857371192\n" ] ], [ [ "<font size=4>In this model we see that it's overfitting after third epoch. It is caused by too high number of units.", "_____no_output_____" ], [ "### Different learning rate", "_____no_output_____" ] ], [ [ "model_fc_01 = keras.Sequential([\n layers.Dense(32, activation=\"relu\",input_shape=(28,28,1)),\n layers.Dense(64, activation=\"relu\"),\n \n layers.Flatten(),\n layers.Dense(128, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc_01.summary()\npredict_model(model_fc_01,[es], epochs=100, lr=0.05)", "Model: \"sequential_21\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_63 (Dense) (None, 28, 28, 32) 64 \n_________________________________________________________________\ndense_64 (Dense) (None, 28, 28, 64) 2112 \n_________________________________________________________________\nflatten_21 (Flatten) (None, 50176) 0 \n_________________________________________________________________\ndense_65 (Dense) (None, 128) 6422656 \n_________________________________________________________________\ndropout_20 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_66 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 6,426,122\nTrainable params: 6,426,122\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 6s 13ms/step - loss: 2.5641 - accuracy: 0.5516 - precision: 0.7483 - recall: 0.4283 - val_loss: 0.4579 - val_accuracy: 0.8913 - val_precision: 0.9146 - val_recall: 0.8734\nEpoch 2/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.6291 - accuracy: 0.8000 - precision: 0.8749 - recall: 0.7328 - val_loss: 0.2885 - val_accuracy: 0.9248 - val_precision: 0.9536 - val_recall: 0.8866\nEpoch 3/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.5281 - accuracy: 0.8320 - precision: 0.8874 - recall: 0.7802 - val_loss: 0.2769 - val_accuracy: 0.9264 - val_precision: 0.9468 - val_recall: 0.9092\nEpoch 4/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.5307 - accuracy: 0.8348 - precision: 0.8777 - recall: 0.7971 - val_loss: 0.2822 - val_accuracy: 0.9258 - val_precision: 0.9504 - val_recall: 0.8993\nEpoch 5/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.6051 - accuracy: 0.8107 - precision: 0.8614 - recall: 0.7684 - val_loss: 0.2608 - val_accuracy: 0.9299 - val_precision: 0.9458 - val_recall: 0.9134\nEpoch 6/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.4968 - accuracy: 0.8454 - precision: 0.8837 - recall: 0.8143 - val_loss: 0.2928 - val_accuracy: 0.9188 - val_precision: 0.9445 - val_recall: 0.8965\nEpoch 7/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.5996 - accuracy: 0.8170 - precision: 0.8669 - recall: 0.7753 - val_loss: 0.3559 - val_accuracy: 0.9036 - val_precision: 0.9382 - val_recall: 0.8711\nEpoch 8/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.5959 - accuracy: 0.8130 - precision: 0.8761 - recall: 0.7515 - val_loss: 0.3750 - val_accuracy: 0.9023 - val_precision: 0.9415 - val_recall: 0.8524\nRestoring model weights from the end of the best epoch.\nEpoch 00008: early stopping\nTest loss: 0.28543806076049805\nTest accuracy: 0.9227142930030823\nTest precision: 0.942704439163208\nTest recall: 0.9072856903076172\nTest f1 score: 0.9227142857142857\nTest AUC for digit: 0 0.9789563324393538\nTest AUC for digit: 1 0.979840545957394\nTest AUC for digit: 2 0.9702686971098221\nTest AUC for digit: 3 0.927925323986961\nTest AUC for digit: 4 0.9539853844616494\nTest AUC for digit: 5 0.9262426101687231\nTest AUC for digit: 6 0.9711111111111111\nTest AUC for digit: 7 0.960725329011793\nTest AUC for digit: 8 0.9488095238095239\nTest AUC for digit: 9 0.9511993241314747\n" ] ], [ [ "<font size=4>We took our first model and decided to train it with different learning rates. With learning rate 0.05 we received very bad results (accuracy around 92%). The scores are so bad because our optimizer did not find good weights, because it had to change values with too big \"jump\".", "_____no_output_____" ] ], [ [ "model_fc_00001 = keras.Sequential([\n layers.Dense(32, activation=\"relu\",input_shape=(28,28,1)),\n layers.Dense(64, activation=\"relu\"),\n \n layers.Flatten(),\n layers.Dense(128, activation=\"relu\"),\n layers.Dropout(.25),\n layers.Dense(10, activation=\"softmax\")\n]) \nmodel_fc_00001.summary()\npredict_model(model_fc_00001,[es], epochs=100, lr = 0.00001)", "Model: \"sequential_15\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_46 (Dense) (None, 28, 28, 32) 64 \n_________________________________________________________________\ndense_47 (Dense) (None, 28, 28, 64) 2112 \n_________________________________________________________________\nflatten_15 (Flatten) (None, 50176) 0 \n_________________________________________________________________\ndense_48 (Dense) (None, 128) 6422656 \n_________________________________________________________________\ndropout_14 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_49 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 6,426,122\nTrainable params: 6,426,122\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 6s 13ms/step - loss: 1.8177 - accuracy: 0.5851 - precision: 0.7578 - recall: 0.0475 - val_loss: 0.8030 - val_accuracy: 0.8551 - val_precision: 0.9845 - val_recall: 0.5235\nEpoch 2/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.7526 - accuracy: 0.8255 - precision: 0.9614 - recall: 0.5864 - val_loss: 0.5059 - val_accuracy: 0.8835 - val_precision: 0.9580 - val_recall: 0.7645\nEpoch 3/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.5323 - accuracy: 0.8581 - precision: 0.9378 - recall: 0.7558 - val_loss: 0.4048 - val_accuracy: 0.8981 - val_precision: 0.9491 - val_recall: 0.8320\nEpoch 4/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.4437 - accuracy: 0.8764 - precision: 0.9324 - recall: 0.8142 - val_loss: 0.3561 - val_accuracy: 0.9055 - val_precision: 0.9460 - val_recall: 0.8603\nEpoch 5/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.3928 - accuracy: 0.8891 - precision: 0.9336 - recall: 0.8411 - val_loss: 0.3259 - val_accuracy: 0.9117 - val_precision: 0.9436 - val_recall: 0.8758\nEpoch 6/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.3637 - accuracy: 0.8953 - precision: 0.9334 - recall: 0.8587 - val_loss: 0.3050 - val_accuracy: 0.9153 - val_precision: 0.9434 - val_recall: 0.8876\nEpoch 7/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.3479 - accuracy: 0.8986 - precision: 0.9315 - recall: 0.8666 - val_loss: 0.2869 - val_accuracy: 0.9190 - val_precision: 0.9459 - val_recall: 0.8964\nEpoch 8/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.3248 - accuracy: 0.9051 - precision: 0.9349 - recall: 0.8769 - val_loss: 0.2723 - val_accuracy: 0.9227 - val_precision: 0.9477 - val_recall: 0.9000\nEpoch 9/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.3000 - accuracy: 0.9150 - precision: 0.9403 - recall: 0.8894 - val_loss: 0.2609 - val_accuracy: 0.9258 - val_precision: 0.9487 - val_recall: 0.9040\nEpoch 10/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2921 - accuracy: 0.9155 - precision: 0.9409 - recall: 0.8923 - val_loss: 0.2493 - val_accuracy: 0.9293 - val_precision: 0.9492 - val_recall: 0.9094\nEpoch 11/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2765 - accuracy: 0.9198 - precision: 0.9424 - recall: 0.8979 - val_loss: 0.2389 - val_accuracy: 0.9328 - val_precision: 0.9516 - val_recall: 0.9134\nEpoch 12/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2636 - accuracy: 0.9249 - precision: 0.9456 - recall: 0.9054 - val_loss: 0.2288 - val_accuracy: 0.9346 - val_precision: 0.9539 - val_recall: 0.9189\nEpoch 13/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2534 - accuracy: 0.9258 - precision: 0.9459 - recall: 0.9079 - val_loss: 0.2207 - val_accuracy: 0.9368 - val_precision: 0.9539 - val_recall: 0.9216\nEpoch 14/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2447 - accuracy: 0.9276 - precision: 0.9473 - recall: 0.9091 - val_loss: 0.2116 - val_accuracy: 0.9410 - val_precision: 0.9564 - val_recall: 0.9264\nEpoch 15/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2372 - accuracy: 0.9315 - precision: 0.9506 - recall: 0.9155 - val_loss: 0.2028 - val_accuracy: 0.9443 - val_precision: 0.9583 - val_recall: 0.9296\nEpoch 16/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2272 - accuracy: 0.9334 - precision: 0.9515 - recall: 0.9177 - val_loss: 0.1975 - val_accuracy: 0.9452 - val_precision: 0.9590 - val_recall: 0.9326\nEpoch 17/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2178 - accuracy: 0.9368 - precision: 0.9532 - recall: 0.9223 - val_loss: 0.1886 - val_accuracy: 0.9482 - val_precision: 0.9601 - val_recall: 0.9369\nEpoch 18/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2111 - accuracy: 0.9389 - precision: 0.9525 - recall: 0.9257 - val_loss: 0.1827 - val_accuracy: 0.9505 - val_precision: 0.9626 - val_recall: 0.9390\nEpoch 19/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.2045 - accuracy: 0.9398 - precision: 0.9540 - recall: 0.9263 - val_loss: 0.1758 - val_accuracy: 0.9521 - val_precision: 0.9638 - val_recall: 0.9418\nEpoch 20/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1990 - accuracy: 0.9418 - precision: 0.9564 - recall: 0.9299 - val_loss: 0.1696 - val_accuracy: 0.9540 - val_precision: 0.9645 - val_recall: 0.9443\nEpoch 21/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1892 - accuracy: 0.9436 - precision: 0.9570 - recall: 0.9323 - val_loss: 0.1645 - val_accuracy: 0.9545 - val_precision: 0.9649 - val_recall: 0.9455\nEpoch 22/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1896 - accuracy: 0.9450 - precision: 0.9588 - recall: 0.9329 - val_loss: 0.1605 - val_accuracy: 0.9551 - val_precision: 0.9655 - val_recall: 0.9479\nEpoch 23/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1779 - accuracy: 0.9476 - precision: 0.9589 - recall: 0.9375 - val_loss: 0.1546 - val_accuracy: 0.9557 - val_precision: 0.9679 - val_recall: 0.9489\nEpoch 24/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1749 - accuracy: 0.9487 - precision: 0.9613 - recall: 0.9386 - val_loss: 0.1504 - val_accuracy: 0.9574 - val_precision: 0.9670 - val_recall: 0.9509\nEpoch 25/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1666 - accuracy: 0.9498 - precision: 0.9622 - recall: 0.9410 - val_loss: 0.1468 - val_accuracy: 0.9579 - val_precision: 0.9681 - val_recall: 0.9509\nEpoch 26/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1642 - accuracy: 0.9515 - precision: 0.9622 - recall: 0.9420 - val_loss: 0.1422 - val_accuracy: 0.9602 - val_precision: 0.9699 - val_recall: 0.9534\nEpoch 27/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1584 - accuracy: 0.9539 - precision: 0.9639 - recall: 0.9446 - val_loss: 0.1384 - val_accuracy: 0.9610 - val_precision: 0.9695 - val_recall: 0.9548\nEpoch 28/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1521 - accuracy: 0.9556 - precision: 0.9661 - recall: 0.9471 - val_loss: 0.1349 - val_accuracy: 0.9608 - val_precision: 0.9707 - val_recall: 0.9545\nEpoch 29/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1494 - accuracy: 0.9548 - precision: 0.9648 - recall: 0.9473 - val_loss: 0.1314 - val_accuracy: 0.9633 - val_precision: 0.9719 - val_recall: 0.9567\nEpoch 30/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1435 - accuracy: 0.9581 - precision: 0.9669 - recall: 0.9507 - val_loss: 0.1290 - val_accuracy: 0.9629 - val_precision: 0.9714 - val_recall: 0.9569\nEpoch 31/100\n439/439 [==============================] - 5s 12ms/step - loss: 0.1403 - accuracy: 0.9578 - precision: 0.9669 - recall: 0.9508 - val_loss: 0.1257 - val_accuracy: 0.9646 - val_precision: 0.9718 - val_recall: 0.9586\nEpoch 32/100\n" ] ], [ [ "<font size=4>Model with learning rate equals 0.00001 performed pretty well but it needed 54 epochs to achieve 97,1% accuracy (compared to 6 epochs using standard learning rate equals 0.001). This is because optimizer \"jumped\" too small distance searching best results, and it had to do many iterations to find the best weights.", "_____no_output_____" ], [ "### Basic Multi-layer CNN", "_____no_output_____" ] ], [ [ "model_cnn = keras.Sequential([\n layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D (2,2), \n layers.Conv2D(64, (3,3), activation=\"relu\"),\n layers.MaxPooling2D (2,2), \n\n layers.Flatten(),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn.summary()\npredict_model(model_cnn, [es], epochs=100)", "Model: \"sequential_5\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n_________________________________________________________________\nflatten_5 (Flatten) (None, 1600) 0 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 1600) 0 \n_________________________________________________________________\ndense_21 (Dense) (None, 10) 16010 \n=================================================================\nTotal params: 34,826\nTrainable params: 34,826\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 22s 28ms/step - loss: 0.7757 - accuracy: 0.7586 - precision: 0.8819 - recall: 0.6326 - val_loss: 0.0861 - val_accuracy: 0.9740 - val_precision: 0.9813 - val_recall: 0.9697\nEpoch 2/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1093 - accuracy: 0.9654 - precision: 0.9719 - recall: 0.9608 - val_loss: 0.0568 - val_accuracy: 0.9833 - val_precision: 0.9858 - val_recall: 0.9804\nEpoch 3/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0831 - accuracy: 0.9750 - precision: 0.9788 - recall: 0.9721 - val_loss: 0.0478 - val_accuracy: 0.9866 - val_precision: 0.9887 - val_recall: 0.9846\nEpoch 4/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0692 - accuracy: 0.9781 - precision: 0.9811 - recall: 0.9756 - val_loss: 0.0391 - val_accuracy: 0.9886 - val_precision: 0.9910 - val_recall: 0.9869\nEpoch 5/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0525 - accuracy: 0.9844 - precision: 0.9866 - recall: 0.9822 - val_loss: 0.0364 - val_accuracy: 0.9895 - val_precision: 0.9920 - val_recall: 0.9879\nEpoch 6/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0548 - accuracy: 0.9820 - precision: 0.9843 - recall: 0.9804 - val_loss: 0.0351 - val_accuracy: 0.9899 - val_precision: 0.9918 - val_recall: 0.9890\nEpoch 7/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0452 - accuracy: 0.9856 - precision: 0.9872 - recall: 0.9844 - val_loss: 0.0332 - val_accuracy: 0.9903 - val_precision: 0.9923 - val_recall: 0.9895\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0416 - accuracy: 0.9865 - precision: 0.9884 - recall: 0.9850 - val_loss: 0.0349 - val_accuracy: 0.9898 - val_precision: 0.9910 - val_recall: 0.9892\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0412 - accuracy: 0.9868 - precision: 0.9884 - recall: 0.9854 - val_loss: 0.0303 - val_accuracy: 0.9918 - val_precision: 0.9928 - val_recall: 0.9908\nEpoch 10/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0366 - accuracy: 0.9881 - precision: 0.9893 - recall: 0.9870 - val_loss: 0.0307 - val_accuracy: 0.9911 - val_precision: 0.9919 - val_recall: 0.9899\nEpoch 11/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0414 - accuracy: 0.9866 - precision: 0.9877 - recall: 0.9853 - val_loss: 0.0335 - val_accuracy: 0.9911 - val_precision: 0.9918 - val_recall: 0.9906\nEpoch 12/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0345 - accuracy: 0.9890 - precision: 0.9897 - recall: 0.9881 - val_loss: 0.0316 - val_accuracy: 0.9911 - val_precision: 0.9922 - val_recall: 0.9903\nEpoch 00012: early stopping\nTest loss: 0.03780661150813103\nTest accuracy: 0.9877142906188965\nTest precision: 0.988834798336029\nTest recall: 0.9868571162223816\nTest f1 score: 0.9877142857142858\nTest AUC for digit: 0 0.9986987490590519\nTest AUC for digit: 1 0.9946251332301316\nTest AUC for digit: 2 0.9912251490762208\nTest AUC for digit: 3 0.9914630429597434\nTest AUC for digit: 4 0.9935349833293058\nTest AUC for digit: 5 0.9949048474533845\nTest AUC for digit: 6 0.9943650793650793\nTest AUC for digit: 7 0.9923824507574553\nTest AUC for digit: 8 0.9917460317460318\nTest AUC for digit: 9 0.9890579103854441\n" ] ], [ [ "<font size=4>Our first convolutional model with 2 convolutional layers was performing even better then fully connected neural networks. This model is not overfitted, because train and validation loss and accuracy are close to each other. It has only 34,826 parameters to train, so the training of such model is pretty fast. On test set model achieves 98.7% accuracy which is great result.", "_____no_output_____" ], [ "### Different number of convolutional layers", "_____no_output_____" ] ], [ [ "model_cnn_short = keras.Sequential([\n layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D (2,2),\n\n layers.Flatten(),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_short.summary()\npredict_model(model_cnn_short, [es], epochs=100)", "Model: \"sequential_6\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_2 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 13, 13, 32) 0 \n_________________________________________________________________\nflatten_6 (Flatten) (None, 5408) 0 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 5408) 0 \n_________________________________________________________________\ndense_22 (Dense) (None, 10) 54090 \n=================================================================\nTotal params: 54,410\nTrainable params: 54,410\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 4s 8ms/step - loss: 0.7126 - accuracy: 0.7976 - precision: 0.9174 - recall: 0.6466 - val_loss: 0.1849 - val_accuracy: 0.9453 - val_precision: 0.9596 - val_recall: 0.9362\nEpoch 2/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1896 - accuracy: 0.9442 - precision: 0.9575 - recall: 0.9324 - val_loss: 0.1285 - val_accuracy: 0.9629 - val_precision: 0.9724 - val_recall: 0.9567\nEpoch 3/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1387 - accuracy: 0.9582 - precision: 0.9662 - recall: 0.9508 - val_loss: 0.0996 - val_accuracy: 0.9719 - val_precision: 0.9783 - val_recall: 0.9678\nEpoch 4/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1196 - accuracy: 0.9648 - precision: 0.9723 - recall: 0.9583 - val_loss: 0.0876 - val_accuracy: 0.9737 - val_precision: 0.9791 - val_recall: 0.9713\nEpoch 5/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1106 - accuracy: 0.9663 - precision: 0.9729 - recall: 0.9611 - val_loss: 0.0805 - val_accuracy: 0.9765 - val_precision: 0.9801 - val_recall: 0.9739\nEpoch 6/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0965 - accuracy: 0.9702 - precision: 0.9750 - recall: 0.9659 - val_loss: 0.0732 - val_accuracy: 0.9788 - val_precision: 0.9829 - val_recall: 0.9759\nEpoch 7/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0908 - accuracy: 0.9717 - precision: 0.9763 - recall: 0.9680 - val_loss: 0.0660 - val_accuracy: 0.9805 - val_precision: 0.9827 - val_recall: 0.9771\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0875 - accuracy: 0.9728 - precision: 0.9768 - recall: 0.9696 - val_loss: 0.0633 - val_accuracy: 0.9824 - val_precision: 0.9852 - val_recall: 0.9799\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0787 - accuracy: 0.9759 - precision: 0.9793 - recall: 0.9729 - val_loss: 0.0615 - val_accuracy: 0.9817 - val_precision: 0.9856 - val_recall: 0.9789\nEpoch 10/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0759 - accuracy: 0.9759 - precision: 0.9799 - recall: 0.9731 - val_loss: 0.0580 - val_accuracy: 0.9824 - val_precision: 0.9849 - val_recall: 0.9797\nEpoch 11/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0730 - accuracy: 0.9771 - precision: 0.9806 - recall: 0.9748 - val_loss: 0.0567 - val_accuracy: 0.9828 - val_precision: 0.9849 - val_recall: 0.9798\nEpoch 12/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0632 - accuracy: 0.9803 - precision: 0.9833 - recall: 0.9784 - val_loss: 0.0567 - val_accuracy: 0.9827 - val_precision: 0.9845 - val_recall: 0.9807\nEpoch 13/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0649 - accuracy: 0.9793 - precision: 0.9820 - recall: 0.9770 - val_loss: 0.0559 - val_accuracy: 0.9837 - val_precision: 0.9854 - val_recall: 0.9820\nEpoch 14/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0647 - accuracy: 0.9799 - precision: 0.9823 - recall: 0.9780 - val_loss: 0.0534 - val_accuracy: 0.9844 - val_precision: 0.9861 - val_recall: 0.9825\nEpoch 15/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0569 - accuracy: 0.9815 - precision: 0.9842 - recall: 0.9795 - val_loss: 0.0537 - val_accuracy: 0.9846 - val_precision: 0.9857 - val_recall: 0.9834\nEpoch 16/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0576 - accuracy: 0.9815 - precision: 0.9833 - recall: 0.9797 - val_loss: 0.0508 - val_accuracy: 0.9846 - val_precision: 0.9860 - val_recall: 0.9827\nEpoch 17/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0528 - accuracy: 0.9825 - precision: 0.9848 - recall: 0.9807 - val_loss: 0.0503 - val_accuracy: 0.9850 - val_precision: 0.9864 - val_recall: 0.9843\nEpoch 18/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0555 - accuracy: 0.9822 - precision: 0.9844 - recall: 0.9808 - val_loss: 0.0502 - val_accuracy: 0.9851 - val_precision: 0.9874 - val_recall: 0.9835\nEpoch 19/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0516 - accuracy: 0.9831 - precision: 0.9852 - recall: 0.9814 - val_loss: 0.0498 - val_accuracy: 0.9861 - val_precision: 0.9877 - val_recall: 0.9848\nEpoch 20/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0481 - accuracy: 0.9842 - precision: 0.9860 - recall: 0.9827 - val_loss: 0.0483 - val_accuracy: 0.9853 - val_precision: 0.9867 - val_recall: 0.9841\nEpoch 21/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0466 - accuracy: 0.9856 - precision: 0.9873 - recall: 0.9842 - val_loss: 0.0496 - val_accuracy: 0.9853 - val_precision: 0.9870 - val_recall: 0.9843\nEpoch 22/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0456 - accuracy: 0.9849 - precision: 0.9869 - recall: 0.9834 - val_loss: 0.0469 - val_accuracy: 0.9867 - val_precision: 0.9884 - val_recall: 0.9859\nEpoch 23/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0454 - accuracy: 0.9850 - precision: 0.9869 - recall: 0.9837 - val_loss: 0.0479 - val_accuracy: 0.9866 - val_precision: 0.9876 - val_recall: 0.9848\nEpoch 24/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0454 - accuracy: 0.9855 - precision: 0.9874 - recall: 0.9840 - val_loss: 0.0476 - val_accuracy: 0.9857 - val_precision: 0.9870 - val_recall: 0.9853\nEpoch 25/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0442 - accuracy: 0.9851 - precision: 0.9868 - recall: 0.9840 - val_loss: 0.0483 - val_accuracy: 0.9864 - val_precision: 0.9871 - val_recall: 0.9853\nEpoch 00025: early stopping\nTest loss: 0.06184159591794014\nTest accuracy: 0.9821428656578064\nTest precision: 0.9840951561927795\nTest recall: 0.9811428785324097\nTest f1 score: 0.9821428571428571\nTest AUC for digit: 0 0.9946403781193142\nTest AUC for digit: 1 0.9928970988504665\nTest AUC for digit: 2 0.9918050336696427\nTest AUC for digit: 3 0.9915450335250312\nTest AUC for digit: 4 0.9882236580836722\nTest AUC for digit: 5 0.9855790503765007\nTest AUC for digit: 6 0.9932539682539682\nTest AUC for digit: 7 0.9851409580367276\nTest AUC for digit: 8 0.9859523809523809\nTest AUC for digit: 9 0.9908959987735204\n" ] ], [ [ "<font size=4>Next model has only 1 convolution layer which has more parameters (54,410) because of less number of pooling layers. The results are satisfying, but not as good as previous model (test accuracy equals 98,2%).", "_____no_output_____" ] ], [ [ "model_cnn_long = keras.Sequential([\n layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D ((2,2),1), \n layers.Conv2D(64, (3,3), activation=\"relu\"),\n layers.MaxPooling2D ((2,2),1), \n layers.Conv2D(128, (3,3), activation=\"relu\"),\n layers.MaxPooling2D ((2,2),1), \n layers.Conv2D(512, (3,3), activation=\"relu\"),\n layers.MaxPooling2D ((2,2),1), \n\n layers.Flatten(),\n layers.Dense(128, activation=\"relu\"),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_long.summary()\npredict_model(model_cnn_long, [es], epochs=100)", "Model: \"sequential_28\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_32 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d_17 (MaxPooling (None, 25, 25, 32) 0 \n_________________________________________________________________\nconv2d_33 (Conv2D) (None, 23, 23, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_18 (MaxPooling (None, 22, 22, 64) 0 \n_________________________________________________________________\nconv2d_34 (Conv2D) (None, 20, 20, 128) 73856 \n_________________________________________________________________\nmax_pooling2d_19 (MaxPooling (None, 19, 19, 128) 0 \n_________________________________________________________________\nconv2d_35 (Conv2D) (None, 17, 17, 512) 590336 \n_________________________________________________________________\nmax_pooling2d_20 (MaxPooling (None, 16, 16, 512) 0 \n_________________________________________________________________\nflatten_28 (Flatten) (None, 131072) 0 \n_________________________________________________________________\ndense_85 (Dense) (None, 128) 16777344 \n_________________________________________________________________\ndropout_22 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_86 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 17,461,642\nTrainable params: 17,461,642\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 19s 42ms/step - loss: 0.5369 - accuracy: 0.8446 - precision: 0.9042 - recall: 0.7958 - val_loss: 0.0644 - val_accuracy: 0.9812 - val_precision: 0.9828 - val_recall: 0.9805\nEpoch 2/100\n439/439 [==============================] - 18s 40ms/step - loss: 0.0900 - accuracy: 0.9740 - precision: 0.9778 - recall: 0.9702 - val_loss: 0.0481 - val_accuracy: 0.9846 - val_precision: 0.9861 - val_recall: 0.9833\nEpoch 3/100\n439/439 [==============================] - 18s 40ms/step - loss: 0.0727 - accuracy: 0.9790 - precision: 0.9818 - recall: 0.9766 - val_loss: 0.0360 - val_accuracy: 0.9880 - val_precision: 0.9900 - val_recall: 0.9864\nEpoch 4/100\n439/439 [==============================] - 18s 41ms/step - loss: 0.0553 - accuracy: 0.9824 - precision: 0.9848 - recall: 0.9804 - val_loss: 0.0303 - val_accuracy: 0.9915 - val_precision: 0.9922 - val_recall: 0.9909\nEpoch 5/100\n439/439 [==============================] - 18s 40ms/step - loss: 0.0370 - accuracy: 0.9888 - precision: 0.9900 - recall: 0.9874 - val_loss: 0.0300 - val_accuracy: 0.9925 - val_precision: 0.9935 - val_recall: 0.9922\nEpoch 6/100\n439/439 [==============================] - 18s 40ms/step - loss: 0.0322 - accuracy: 0.9895 - precision: 0.9908 - recall: 0.9885 - val_loss: 0.0272 - val_accuracy: 0.9941 - val_precision: 0.9942 - val_recall: 0.9935\nEpoch 7/100\n439/439 [==============================] - 18s 41ms/step - loss: 0.0295 - accuracy: 0.9911 - precision: 0.9922 - recall: 0.9904 - val_loss: 0.0321 - val_accuracy: 0.9916 - val_precision: 0.9923 - val_recall: 0.9915\nEpoch 8/100\n439/439 [==============================] - 18s 41ms/step - loss: 0.0277 - accuracy: 0.9911 - precision: 0.9924 - recall: 0.9905 - val_loss: 0.0221 - val_accuracy: 0.9932 - val_precision: 0.9941 - val_recall: 0.9931\nEpoch 9/100\n439/439 [==============================] - 18s 41ms/step - loss: 0.0235 - accuracy: 0.9925 - precision: 0.9931 - recall: 0.9920 - val_loss: 0.0280 - val_accuracy: 0.9924 - val_precision: 0.9925 - val_recall: 0.9924\nRestoring model weights from the end of the best epoch.\nEpoch 00009: early stopping\nTest loss: 0.03389483317732811\nTest accuracy: 0.9918571710586548\nTest precision: 0.992139458656311\nTest recall: 0.9917142987251282\nTest f1 score: 0.9918571428571429\nTest AUC for digit: 0 0.9990376444107582\nTest AUC for digit: 1 0.9981951123812957\nTest AUC for digit: 2 0.9929911952983329\nTest AUC for digit: 3 0.9945313397184135\nTest AUC for digit: 4 0.9947926711881807\nTest AUC for digit: 5 0.9956330341254677\nTest AUC for digit: 6 0.9968966508108128\nTest AUC for digit: 7 0.999361124421019\nTest AUC for digit: 8 0.9925846524567535\nTest AUC for digit: 9 0.990134076231021\n" ] ], [ [ "<font size=4>Next we created a neural network with 4 convolutional layers and with 17 milion parameters. The model was not overfitted. It had accuracy around 99.2% for test, train and validation model. Time needed to train this model was much higher (19s per epoch comparing to 3s per epoch in CNN that we have implemented). This is the best model that we have created for this dataset.", "_____no_output_____" ], [ "### Different number of filters per layer", "_____no_output_____" ] ], [ [ "model_cnn_min = keras.Sequential([\n layers.Conv2D(4, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D (2,2), \n layers.Conv2D(16, (3,3), activation=\"relu\"),\n layers.MaxPooling2D (2,2), \n\n layers.Flatten(),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_min.summary()\npredict_model(model_cnn_min, [es], epochs=100)", "Model: \"sequential_9\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_9 (Conv2D) (None, 26, 26, 4) 40 \n_________________________________________________________________\nmax_pooling2d_7 (MaxPooling2 (None, 13, 13, 4) 0 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 11, 11, 16) 592 \n_________________________________________________________________\nmax_pooling2d_8 (MaxPooling2 (None, 5, 5, 16) 0 \n_________________________________________________________________\nflatten_9 (Flatten) (None, 400) 0 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 400) 0 \n_________________________________________________________________\ndense_28 (Dense) (None, 10) 4010 \n=================================================================\nTotal params: 4,642\nTrainable params: 4,642\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 20s 29ms/step - loss: 1.2412 - accuracy: 0.5863 - precision: 0.8270 - recall: 0.3904 - val_loss: 0.1895 - val_accuracy: 0.9508 - val_precision: 0.9644 - val_recall: 0.9309\nEpoch 2/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.2660 - accuracy: 0.9202 - precision: 0.9399 - recall: 0.9013 - val_loss: 0.1206 - val_accuracy: 0.9649 - val_precision: 0.9742 - val_recall: 0.9571\nEpoch 3/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1972 - accuracy: 0.9398 - precision: 0.9519 - recall: 0.9284 - val_loss: 0.0964 - val_accuracy: 0.9716 - val_precision: 0.9768 - val_recall: 0.9651\nEpoch 4/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1631 - accuracy: 0.9512 - precision: 0.9599 - recall: 0.9423 - val_loss: 0.0831 - val_accuracy: 0.9760 - val_precision: 0.9807 - val_recall: 0.9701\nEpoch 5/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1448 - accuracy: 0.9565 - precision: 0.9640 - recall: 0.9497 - val_loss: 0.0728 - val_accuracy: 0.9786 - val_precision: 0.9820 - val_recall: 0.9747\nEpoch 6/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1358 - accuracy: 0.9586 - precision: 0.9660 - recall: 0.9528 - val_loss: 0.0668 - val_accuracy: 0.9810 - val_precision: 0.9846 - val_recall: 0.9784\nEpoch 7/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1321 - accuracy: 0.9604 - precision: 0.9674 - recall: 0.9548 - val_loss: 0.0647 - val_accuracy: 0.9827 - val_precision: 0.9860 - val_recall: 0.9788\nEpoch 8/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1219 - accuracy: 0.9629 - precision: 0.9683 - recall: 0.9573 - val_loss: 0.0597 - val_accuracy: 0.9824 - val_precision: 0.9862 - val_recall: 0.9801\nEpoch 9/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1195 - accuracy: 0.9628 - precision: 0.9687 - recall: 0.9579 - val_loss: 0.0588 - val_accuracy: 0.9821 - val_precision: 0.9861 - val_recall: 0.9805\nEpoch 10/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1119 - accuracy: 0.9654 - precision: 0.9695 - recall: 0.9610 - val_loss: 0.0542 - val_accuracy: 0.9841 - val_precision: 0.9865 - val_recall: 0.9818\nEpoch 11/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1154 - accuracy: 0.9644 - precision: 0.9700 - recall: 0.9598 - val_loss: 0.0537 - val_accuracy: 0.9854 - val_precision: 0.9875 - val_recall: 0.9827\nEpoch 12/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1106 - accuracy: 0.9667 - precision: 0.9717 - recall: 0.9622 - val_loss: 0.0522 - val_accuracy: 0.9850 - val_precision: 0.9875 - val_recall: 0.9827\nEpoch 13/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1048 - accuracy: 0.9677 - precision: 0.9727 - recall: 0.9641 - val_loss: 0.0522 - val_accuracy: 0.9850 - val_precision: 0.9868 - val_recall: 0.9830\nEpoch 14/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1069 - accuracy: 0.9672 - precision: 0.9715 - recall: 0.9638 - val_loss: 0.0490 - val_accuracy: 0.9857 - val_precision: 0.9877 - val_recall: 0.9843\nEpoch 15/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1051 - accuracy: 0.9676 - precision: 0.9712 - recall: 0.9637 - val_loss: 0.0488 - val_accuracy: 0.9861 - val_precision: 0.9875 - val_recall: 0.9835\nEpoch 16/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.1029 - accuracy: 0.9683 - precision: 0.9719 - recall: 0.9650 - val_loss: 0.0472 - val_accuracy: 0.9866 - val_precision: 0.9884 - val_recall: 0.9851\nEpoch 17/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0990 - accuracy: 0.9694 - precision: 0.9730 - recall: 0.9657 - val_loss: 0.0461 - val_accuracy: 0.9863 - val_precision: 0.9883 - val_recall: 0.9847\nEpoch 18/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0960 - accuracy: 0.9705 - precision: 0.9743 - recall: 0.9670 - val_loss: 0.0482 - val_accuracy: 0.9864 - val_precision: 0.9881 - val_recall: 0.9838\nEpoch 19/100\n439/439 [==============================] - 3s 6ms/step - loss: 0.0978 - accuracy: 0.9701 - precision: 0.9744 - recall: 0.9665 - val_loss: 0.0482 - val_accuracy: 0.9859 - val_precision: 0.9877 - val_recall: 0.9840\nEpoch 00019: early stopping\nTest loss: 0.06978869438171387\nTest accuracy: 0.9787142872810364\nTest precision: 0.9813513159751892\nTest recall: 0.9772857427597046\nTest f1 score: 0.9787142857142858\nTest AUC for digit: 0 0.9965322040694353\nTest AUC for digit: 1 0.9950131545729289\nTest AUC for digit: 2 0.9902744281128235\nTest AUC for digit: 3 0.9815570919354413\nTest AUC for digit: 4 0.9918910464386338\nTest AUC for digit: 5 0.9894167367175205\nTest AUC for digit: 6 0.9894444444444445\nTest AUC for digit: 7 0.9815003368863819\nTest AUC for digit: 8 0.9804761904761905\nTest AUC for digit: 9 0.9860031281752693\n" ] ], [ [ "<font size=4>Next we decided to check how number of filters impact to performance of model. Reducing number of filter in convolutional layers made our model worse than basic model. Accuracy has fallen to 97.8%, because this model was too simple to explain complexity of our data. This model is underfitted.", "_____no_output_____" ] ], [ [ "model_cnn_max = keras.Sequential([\n layers.Conv2D(128, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D (2,2), \n layers.Conv2D(512, (3,3), activation=\"relu\"),\n layers.MaxPooling2D (2,2), \n\n layers.Flatten(),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_max.summary()\npredict_model(model_cnn_max, [es], epochs=100)", "Model: \"sequential_10\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_11 (Conv2D) (None, 26, 26, 128) 1280 \n_________________________________________________________________\nmax_pooling2d_9 (MaxPooling2 (None, 13, 13, 128) 0 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 11, 11, 512) 590336 \n_________________________________________________________________\nmax_pooling2d_10 (MaxPooling (None, 5, 5, 512) 0 \n_________________________________________________________________\nflatten_10 (Flatten) (None, 12800) 0 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 12800) 0 \n_________________________________________________________________\ndense_29 (Dense) (None, 10) 128010 \n=================================================================\nTotal params: 719,626\nTrainable params: 719,626\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 27s 44ms/step - loss: 0.3990 - accuracy: 0.8824 - precision: 0.9333 - recall: 0.8202 - val_loss: 0.0549 - val_accuracy: 0.9824 - val_precision: 0.9854 - val_recall: 0.9804\nEpoch 2/100\n439/439 [==============================] - 9s 19ms/step - loss: 0.0521 - accuracy: 0.9837 - precision: 0.9856 - recall: 0.9818 - val_loss: 0.0435 - val_accuracy: 0.9857 - val_precision: 0.9870 - val_recall: 0.9844\nEpoch 3/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0429 - accuracy: 0.9862 - precision: 0.9876 - recall: 0.9849 - val_loss: 0.0343 - val_accuracy: 0.9896 - val_precision: 0.9902 - val_recall: 0.9885\nEpoch 4/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0320 - accuracy: 0.9897 - precision: 0.9904 - recall: 0.9890 - val_loss: 0.0361 - val_accuracy: 0.9899 - val_precision: 0.9909 - val_recall: 0.9895\nEpoch 5/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0249 - accuracy: 0.9921 - precision: 0.9929 - recall: 0.9916 - val_loss: 0.0323 - val_accuracy: 0.9900 - val_precision: 0.9913 - val_recall: 0.9890\nEpoch 6/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0233 - accuracy: 0.9925 - precision: 0.9928 - recall: 0.9920 - val_loss: 0.0347 - val_accuracy: 0.9899 - val_precision: 0.9903 - val_recall: 0.9896\nEpoch 7/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0185 - accuracy: 0.9941 - precision: 0.9945 - recall: 0.9938 - val_loss: 0.0343 - val_accuracy: 0.9915 - val_precision: 0.9921 - val_recall: 0.9911\nEpoch 8/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0156 - accuracy: 0.9946 - precision: 0.9950 - recall: 0.9943 - val_loss: 0.0346 - val_accuracy: 0.9906 - val_precision: 0.9908 - val_recall: 0.9903\nEpoch 9/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0137 - accuracy: 0.9956 - precision: 0.9960 - recall: 0.9954 - val_loss: 0.0362 - val_accuracy: 0.9899 - val_precision: 0.9906 - val_recall: 0.9899\nEpoch 10/100\n439/439 [==============================] - 8s 19ms/step - loss: 0.0112 - accuracy: 0.9961 - precision: 0.9966 - recall: 0.9959 - val_loss: 0.0365 - val_accuracy: 0.9903 - val_precision: 0.9909 - val_recall: 0.9902\nEpoch 00010: early stopping\nTest loss: 0.04324812442064285\nTest accuracy: 0.9902856945991516\nTest precision: 0.9904244542121887\nTest recall: 0.9900000095367432\nTest f1 score: 0.9902857142857143\nTest AUC for digit: 0 0.9962574850299402\nTest AUC for digit: 1 0.9940907921236538\nTest AUC for digit: 2 0.9984028163786178\nTest AUC for digit: 3 0.993960028357141\nTest AUC for digit: 4 0.9914325054702\nTest AUC for digit: 5 0.9978038687780799\nTest AUC for digit: 6 0.9957936507936508\nTest AUC for digit: 7 0.9913140793939336\nTest AUC for digit: 8 0.9926190476190476\nTest AUC for digit: 9 0.9948151881227197\n" ] ], [ [ "<font size=4>Next we increased number of filters. This caused a raise of number of parameters to over 700 thousands but model did not perform better than basic model. Test accuracy was 99%, which is slightly less than basic model's accuracy. It means that we should not use such high number of filters because we do not need them. This model also seems to be overfitted.", "_____no_output_____" ], [ "### Different size and type of pooling layers", "_____no_output_____" ] ], [ [ "model_cnn_pool5 = keras.Sequential([\n layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D (5,3), \n layers.Conv2D(64, (3,3), activation=\"relu\"),\n layers.MaxPooling2D (5,3), \n\n layers.Flatten(),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_pool5.summary()\npredict_model(model_cnn_pool5, [es], epochs=100)", "Model: \"sequential_17\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_13 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d_11 (MaxPooling (None, 8, 8, 32) 0 \n_________________________________________________________________\nconv2d_14 (Conv2D) (None, 6, 6, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_12 (MaxPooling (None, 1, 1, 64) 0 \n_________________________________________________________________\nflatten_17 (Flatten) (None, 64) 0 \n_________________________________________________________________\ndropout_16 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_54 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 19,466\nTrainable params: 19,466\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 17s 25ms/step - loss: 1.6386 - accuracy: 0.4443 - precision: 0.7621 - recall: 0.1895 - val_loss: 0.3402 - val_accuracy: 0.9205 - val_precision: 0.9659 - val_recall: 0.8592\nEpoch 2/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.5372 - accuracy: 0.8335 - precision: 0.8979 - recall: 0.7583 - val_loss: 0.2031 - val_accuracy: 0.9482 - val_precision: 0.9721 - val_recall: 0.9242\nEpoch 3/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.3899 - accuracy: 0.8815 - precision: 0.9224 - recall: 0.8415 - val_loss: 0.1599 - val_accuracy: 0.9600 - val_precision: 0.9747 - val_recall: 0.9439\nEpoch 4/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.3313 - accuracy: 0.8988 - precision: 0.9294 - recall: 0.8673 - val_loss: 0.1329 - val_accuracy: 0.9629 - val_precision: 0.9762 - val_recall: 0.9489\nEpoch 5/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2954 - accuracy: 0.9092 - precision: 0.9359 - recall: 0.8834 - val_loss: 0.1220 - val_accuracy: 0.9681 - val_precision: 0.9795 - val_recall: 0.9576\nEpoch 6/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.2813 - accuracy: 0.9143 - precision: 0.9392 - recall: 0.8897 - val_loss: 0.1032 - val_accuracy: 0.9701 - val_precision: 0.9799 - val_recall: 0.9633\nEpoch 7/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.2496 - accuracy: 0.9239 - precision: 0.9440 - recall: 0.9045 - val_loss: 0.1051 - val_accuracy: 0.9704 - val_precision: 0.9797 - val_recall: 0.9593\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2431 - accuracy: 0.9248 - precision: 0.9447 - recall: 0.9058 - val_loss: 0.0936 - val_accuracy: 0.9706 - val_precision: 0.9798 - val_recall: 0.9641\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2384 - accuracy: 0.9260 - precision: 0.9459 - recall: 0.9090 - val_loss: 0.0853 - val_accuracy: 0.9766 - val_precision: 0.9830 - val_recall: 0.9696\nEpoch 10/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2130 - accuracy: 0.9333 - precision: 0.9492 - recall: 0.9185 - val_loss: 0.0826 - val_accuracy: 0.9762 - val_precision: 0.9835 - val_recall: 0.9711\nEpoch 11/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2164 - accuracy: 0.9316 - precision: 0.9492 - recall: 0.9175 - val_loss: 0.0828 - val_accuracy: 0.9746 - val_precision: 0.9807 - val_recall: 0.9701\nEpoch 12/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2055 - accuracy: 0.9370 - precision: 0.9515 - recall: 0.9241 - val_loss: 0.0853 - val_accuracy: 0.9730 - val_precision: 0.9796 - val_recall: 0.9698\nEpoch 00012: early stopping\nTest loss: 0.09769929945468903\nTest accuracy: 0.9715714454650879\nTest precision: 0.9770860075950623\nTest recall: 0.9685714244842529\nTest f1 score: 0.9715714285714285\nTest AUC for digit: 0 0.9948361331663899\nTest AUC for digit: 1 0.9941565446942954\nTest AUC for digit: 2 0.9841899519327072\nTest AUC for digit: 3 0.9719724776720643\nTest AUC for digit: 4 0.9860265364292893\nTest AUC for digit: 5 0.9800909396406366\nTest AUC for digit: 6 0.9928571428571429\nTest AUC for digit: 7 0.9823115528040143\nTest AUC for digit: 8 0.9796031746031746\nTest AUC for digit: 9 0.9761439939197929\n" ] ], [ [ "<font size=4>Next, we checked different size of pooling layers. We decided to create a MaxPooling layers with size equals (5,5) and stride equals 3. It means that we take a square of values with size 5x5 then we look for max value, we write it in the middle of square and than we \"move\" 3 numbers in right or down direction. As we can see, the accuracy is worse than basic model, because we lose too much information in MaxPooling layers. The plots also shows that this model is underfitted.", "_____no_output_____" ] ], [ [ "model_cnn_avg = keras.Sequential([\n layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.AveragePooling2D (3,3), \n layers.Conv2D(64, (3,3), activation=\"relu\"),\n layers.AveragePooling2D (3,3), \n\n layers.Flatten(),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_avg.summary()\npredict_model(model_cnn_avg, [es], epochs=100)", "Model: \"sequential_18\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_15 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\naverage_pooling2d_2 (Average (None, 8, 8, 32) 0 \n_________________________________________________________________\nconv2d_16 (Conv2D) (None, 6, 6, 64) 18496 \n_________________________________________________________________\naverage_pooling2d_3 (Average (None, 2, 2, 64) 0 \n_________________________________________________________________\nflatten_18 (Flatten) (None, 256) 0 \n_________________________________________________________________\ndropout_17 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_55 (Dense) (None, 10) 2570 \n=================================================================\nTotal params: 21,386\nTrainable params: 21,386\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 4s 8ms/step - loss: 1.4358 - accuracy: 0.5197 - precision: 0.7783 - recall: 0.3063 - val_loss: 0.2705 - val_accuracy: 0.9248 - val_precision: 0.9531 - val_recall: 0.8922\nEpoch 2/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.3783 - accuracy: 0.8837 - precision: 0.9196 - recall: 0.8476 - val_loss: 0.1960 - val_accuracy: 0.9437 - val_precision: 0.9590 - val_recall: 0.9251\nEpoch 3/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2821 - accuracy: 0.9156 - precision: 0.9367 - recall: 0.8952 - val_loss: 0.1518 - val_accuracy: 0.9548 - val_precision: 0.9683 - val_recall: 0.9423\nEpoch 4/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2438 - accuracy: 0.9272 - precision: 0.9439 - recall: 0.9095 - val_loss: 0.1290 - val_accuracy: 0.9618 - val_precision: 0.9702 - val_recall: 0.9535\nEpoch 5/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2236 - accuracy: 0.9325 - precision: 0.9481 - recall: 0.9185 - val_loss: 0.1161 - val_accuracy: 0.9654 - val_precision: 0.9729 - val_recall: 0.9576\nEpoch 6/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.2054 - accuracy: 0.9364 - precision: 0.9496 - recall: 0.9246 - val_loss: 0.1074 - val_accuracy: 0.9674 - val_precision: 0.9733 - val_recall: 0.9622\nEpoch 7/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1861 - accuracy: 0.9435 - precision: 0.9551 - recall: 0.9341 - val_loss: 0.1005 - val_accuracy: 0.9729 - val_precision: 0.9769 - val_recall: 0.9655\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1737 - accuracy: 0.9474 - precision: 0.9569 - recall: 0.9373 - val_loss: 0.0930 - val_accuracy: 0.9732 - val_precision: 0.9797 - val_recall: 0.9664\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1644 - accuracy: 0.9495 - precision: 0.9593 - recall: 0.9412 - val_loss: 0.0846 - val_accuracy: 0.9769 - val_precision: 0.9813 - val_recall: 0.9713\nEpoch 10/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1554 - accuracy: 0.9541 - precision: 0.9624 - recall: 0.9459 - val_loss: 0.0813 - val_accuracy: 0.9769 - val_precision: 0.9796 - val_recall: 0.9720\nEpoch 11/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1508 - accuracy: 0.9546 - precision: 0.9632 - recall: 0.9462 - val_loss: 0.0804 - val_accuracy: 0.9788 - val_precision: 0.9818 - val_recall: 0.9733\nEpoch 12/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1422 - accuracy: 0.9573 - precision: 0.9656 - recall: 0.9500 - val_loss: 0.0769 - val_accuracy: 0.9784 - val_precision: 0.9820 - val_recall: 0.9747\nEpoch 13/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1312 - accuracy: 0.9601 - precision: 0.9681 - recall: 0.9534 - val_loss: 0.0703 - val_accuracy: 0.9805 - val_precision: 0.9837 - val_recall: 0.9772\nEpoch 14/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1299 - accuracy: 0.9603 - precision: 0.9674 - recall: 0.9546 - val_loss: 0.0670 - val_accuracy: 0.9812 - val_precision: 0.9850 - val_recall: 0.9778\nEpoch 15/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1196 - accuracy: 0.9636 - precision: 0.9695 - recall: 0.9576 - val_loss: 0.0651 - val_accuracy: 0.9814 - val_precision: 0.9845 - val_recall: 0.9785\nEpoch 16/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1184 - accuracy: 0.9636 - precision: 0.9692 - recall: 0.9584 - val_loss: 0.0651 - val_accuracy: 0.9821 - val_precision: 0.9855 - val_recall: 0.9779\nEpoch 17/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1094 - accuracy: 0.9654 - precision: 0.9711 - recall: 0.9599 - val_loss: 0.0589 - val_accuracy: 0.9834 - val_precision: 0.9855 - val_recall: 0.9808\nEpoch 18/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1074 - accuracy: 0.9668 - precision: 0.9724 - recall: 0.9627 - val_loss: 0.0578 - val_accuracy: 0.9837 - val_precision: 0.9866 - val_recall: 0.9811\nEpoch 19/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1010 - accuracy: 0.9683 - precision: 0.9735 - recall: 0.9638 - val_loss: 0.0559 - val_accuracy: 0.9843 - val_precision: 0.9865 - val_recall: 0.9817\nEpoch 20/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0975 - accuracy: 0.9704 - precision: 0.9751 - recall: 0.9660 - val_loss: 0.0564 - val_accuracy: 0.9833 - val_precision: 0.9867 - val_recall: 0.9817\nEpoch 21/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0961 - accuracy: 0.9713 - precision: 0.9754 - recall: 0.9670 - val_loss: 0.0562 - val_accuracy: 0.9846 - val_precision: 0.9865 - val_recall: 0.9821\nEpoch 22/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0930 - accuracy: 0.9708 - precision: 0.9752 - recall: 0.9669 - val_loss: 0.0506 - val_accuracy: 0.9856 - val_precision: 0.9887 - val_recall: 0.9843\nEpoch 23/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0942 - accuracy: 0.9710 - precision: 0.9755 - recall: 0.9667 - val_loss: 0.0512 - val_accuracy: 0.9843 - val_precision: 0.9869 - val_recall: 0.9821\nEpoch 24/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0892 - accuracy: 0.9731 - precision: 0.9770 - recall: 0.9693 - val_loss: 0.0492 - val_accuracy: 0.9863 - val_precision: 0.9884 - val_recall: 0.9846\nEpoch 25/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0863 - accuracy: 0.9739 - precision: 0.9774 - recall: 0.9702 - val_loss: 0.0492 - val_accuracy: 0.9861 - val_precision: 0.9881 - val_recall: 0.9844\nEpoch 26/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0864 - accuracy: 0.9734 - precision: 0.9771 - recall: 0.9697 - val_loss: 0.0491 - val_accuracy: 0.9867 - val_precision: 0.9886 - val_recall: 0.9850\nEpoch 27/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0884 - accuracy: 0.9740 - precision: 0.9777 - recall: 0.9707 - val_loss: 0.0478 - val_accuracy: 0.9857 - val_precision: 0.9877 - val_recall: 0.9835\nEpoch 28/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0799 - accuracy: 0.9758 - precision: 0.9791 - recall: 0.9725 - val_loss: 0.0442 - val_accuracy: 0.9876 - val_precision: 0.9894 - val_recall: 0.9863\nEpoch 29/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0820 - accuracy: 0.9755 - precision: 0.9789 - recall: 0.9724 - val_loss: 0.0449 - val_accuracy: 0.9867 - val_precision: 0.9891 - val_recall: 0.9856\nEpoch 30/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0779 - accuracy: 0.9748 - precision: 0.9784 - recall: 0.9718 - val_loss: 0.0441 - val_accuracy: 0.9867 - val_precision: 0.9887 - val_recall: 0.9848\nEpoch 31/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0730 - accuracy: 0.9779 - precision: 0.9811 - recall: 0.9751 - val_loss: 0.0446 - val_accuracy: 0.9859 - val_precision: 0.9877 - val_recall: 0.9844\n" ] ], [ [ "<font size=4>After, we changed MaxPooling layer to AveragePooling layer. The difference between this two layers is that AveragePooling layer sums the values in the square and divides by the number of values in square. Results are worse than basic model because MaxPooling, by its characteristics, is better when we have black background, because it remembers the most white value in grey-scale. ", "_____no_output_____" ], [ "### Different number of full conected layers", "_____no_output_____" ] ], [ [ "model_cnn_fc = keras.Sequential([\n layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(28,28,1)),\n layers.MaxPooling2D (2,2), \n layers.Conv2D(64, (3,3), activation=\"relu\"),\n layers.MaxPooling2D (2,2), \n\n layers.Flatten(),\n layers.Dense(128, activation=\"relu\"),\n layers.Dense(32, activation=\"relu\"),\n layers.Dropout(.5),\n layers.Dense(10, activation=\"softmax\")\n])\nmodel_cnn_fc.summary()\npredict_model(model_cnn_fc, [es], epochs=100)", "Model: \"sequential_19\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_17 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d_13 (MaxPooling (None, 13, 13, 32) 0 \n_________________________________________________________________\nconv2d_18 (Conv2D) (None, 11, 11, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_14 (MaxPooling (None, 5, 5, 64) 0 \n_________________________________________________________________\nflatten_19 (Flatten) (None, 1600) 0 \n_________________________________________________________________\ndense_56 (Dense) (None, 128) 204928 \n_________________________________________________________________\ndense_57 (Dense) (None, 32) 4128 \n_________________________________________________________________\ndropout_18 (Dropout) (None, 32) 0 \n_________________________________________________________________\ndense_58 (Dense) (None, 10) 330 \n=================================================================\nTotal params: 228,202\nTrainable params: 228,202\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 5s 9ms/step - loss: 0.9434 - accuracy: 0.6736 - precision: 0.8816 - recall: 0.5434 - val_loss: 0.0790 - val_accuracy: 0.9759 - val_precision: 0.9819 - val_recall: 0.9717\nEpoch 2/100\n439/439 [==============================] - 4s 8ms/step - loss: 0.2156 - accuracy: 0.9378 - precision: 0.9588 - recall: 0.9172 - val_loss: 0.0600 - val_accuracy: 0.9835 - val_precision: 0.9864 - val_recall: 0.9815\nEpoch 3/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.1334 - accuracy: 0.9598 - precision: 0.9733 - recall: 0.9482 - val_loss: 0.0479 - val_accuracy: 0.9863 - val_precision: 0.9893 - val_recall: 0.9843\nEpoch 4/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0965 - accuracy: 0.9712 - precision: 0.9809 - recall: 0.9635 - val_loss: 0.0405 - val_accuracy: 0.9895 - val_precision: 0.9916 - val_recall: 0.9879\nEpoch 5/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0784 - accuracy: 0.9771 - precision: 0.9837 - recall: 0.9699 - val_loss: 0.0548 - val_accuracy: 0.9866 - val_precision: 0.9877 - val_recall: 0.9853\nEpoch 6/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0962 - accuracy: 0.9722 - precision: 0.9800 - recall: 0.9644 - val_loss: 0.0438 - val_accuracy: 0.9895 - val_precision: 0.9899 - val_recall: 0.9889\nEpoch 7/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0635 - accuracy: 0.9800 - precision: 0.9857 - recall: 0.9746 - val_loss: 0.0368 - val_accuracy: 0.9903 - val_precision: 0.9909 - val_recall: 0.9895\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0566 - accuracy: 0.9808 - precision: 0.9863 - recall: 0.9768 - val_loss: 0.0331 - val_accuracy: 0.9905 - val_precision: 0.9918 - val_recall: 0.9893\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0508 - accuracy: 0.9838 - precision: 0.9878 - recall: 0.9795 - val_loss: 0.0382 - val_accuracy: 0.9916 - val_precision: 0.9922 - val_recall: 0.9909\nEpoch 10/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0570 - accuracy: 0.9811 - precision: 0.9860 - recall: 0.9768 - val_loss: 0.0343 - val_accuracy: 0.9921 - val_precision: 0.9931 - val_recall: 0.9915\nEpoch 11/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0483 - accuracy: 0.9835 - precision: 0.9882 - recall: 0.9795 - val_loss: 0.0376 - val_accuracy: 0.9925 - val_precision: 0.9932 - val_recall: 0.9921\nEpoch 12/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0452 - accuracy: 0.9859 - precision: 0.9890 - recall: 0.9819 - val_loss: 0.0450 - val_accuracy: 0.9902 - val_precision: 0.9910 - val_recall: 0.9896\nEpoch 13/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0371 - accuracy: 0.9883 - precision: 0.9905 - recall: 0.9848 - val_loss: 0.0395 - val_accuracy: 0.9918 - val_precision: 0.9922 - val_recall: 0.9909\nEpoch 14/100\n439/439 [==============================] - 3s 8ms/step - loss: 0.0443 - accuracy: 0.9852 - precision: 0.9887 - recall: 0.9817 - val_loss: 0.0439 - val_accuracy: 0.9913 - val_precision: 0.9918 - val_recall: 0.9908\nEpoch 00014: early stopping\nTest loss: 0.06664026528596878\nTest accuracy: 0.9888571500778198\nTest precision: 0.9892780780792236\nTest recall: 0.9885714054107666\nTest f1 score: 0.9888571428571429\nTest AUC for digit: 0 0.9990146050287297\nTest AUC for digit: 1 0.9951085363883655\nTest AUC for digit: 2 0.9946951426069947\nTest AUC for digit: 3 0.9941206967376037\nTest AUC for digit: 4 0.9936140097241216\nTest AUC for digit: 5 0.9941211483938233\nTest AUC for digit: 6 0.9940476190476191\nTest AUC for digit: 7 0.9912349278979704\nTest AUC for digit: 8 0.9904761904761904\nTest AUC for digit: 9 0.9916870128535711\n" ] ], [ [ "## The performance of a published network (LeNet5, VGG, Yolo, etc) for recognizing MNIST Digits", "_____no_output_____" ], [ "<font size=4>We decided to implement the architecture of LeNet5 network. The LeNet-5 architecture consists of two sets of convolutional and average pooling layers, followed by a flattening convolutional layer, then two fully-connected layers and finally a softmax classifier.", "_____no_output_____" ], [ "<img src=\"images/lenet5.png\" >", "_____no_output_____" ], [ "### LeNet5", "_____no_output_____" ], [ "<font size=4>This is an implementation of LeNet5 (slightly different, because input shape is 28x28 and in original version was 32x32). Despite of its age the model is pretty accurate (with accuracy 98.9%). This is close to our best models, and it does not have too big number of parameters (only 60,074).", "_____no_output_____" ] ], [ [ "lenet5 = keras.Sequential([\n layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(28,28,1)),\n layers.AveragePooling2D(),\n \n layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'),\n layers.AveragePooling2D(),\n \n layers.Flatten(),\n \n layers.Dense(units=120, activation='relu'),\n layers.Dense(units=84, activation='relu'),\n layers.Dense(units=10, activation='softmax')\n])\n\nlenet5.summary()\npredict_model(lenet5, [es], epochs=100)", "Model: \"sequential_27\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_30 (Conv2D) (None, 26, 26, 6) 60 \n_________________________________________________________________\naverage_pooling2d_12 (Averag (None, 13, 13, 6) 0 \n_________________________________________________________________\nconv2d_31 (Conv2D) (None, 11, 11, 16) 880 \n_________________________________________________________________\naverage_pooling2d_13 (Averag (None, 5, 5, 16) 0 \n_________________________________________________________________\nflatten_27 (Flatten) (None, 400) 0 \n_________________________________________________________________\ndense_82 (Dense) (None, 120) 48120 \n_________________________________________________________________\ndense_83 (Dense) (None, 84) 10164 \n_________________________________________________________________\ndense_84 (Dense) (None, 10) 850 \n=================================================================\nTotal params: 60,074\nTrainable params: 60,074\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/100\n439/439 [==============================] - 4s 7ms/step - loss: 0.8082 - accuracy: 0.7743 - precision: 0.8916 - recall: 0.6138 - val_loss: 0.1701 - val_accuracy: 0.9485 - val_precision: 0.9569 - val_recall: 0.9416\nEpoch 2/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.1352 - accuracy: 0.9603 - precision: 0.9669 - recall: 0.9535 - val_loss: 0.0855 - val_accuracy: 0.9750 - val_precision: 0.9798 - val_recall: 0.9723\nEpoch 3/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0792 - accuracy: 0.9762 - precision: 0.9797 - recall: 0.9734 - val_loss: 0.0775 - val_accuracy: 0.9766 - val_precision: 0.9808 - val_recall: 0.9743\nEpoch 4/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0660 - accuracy: 0.9803 - precision: 0.9830 - recall: 0.9780 - val_loss: 0.0585 - val_accuracy: 0.9827 - val_precision: 0.9855 - val_recall: 0.9812\nEpoch 5/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0530 - accuracy: 0.9835 - precision: 0.9857 - recall: 0.9815 - val_loss: 0.0560 - val_accuracy: 0.9837 - val_precision: 0.9865 - val_recall: 0.9815\nEpoch 6/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0451 - accuracy: 0.9858 - precision: 0.9874 - recall: 0.9846 - val_loss: 0.0515 - val_accuracy: 0.9848 - val_precision: 0.9877 - val_recall: 0.9838\nEpoch 7/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0382 - accuracy: 0.9878 - precision: 0.9892 - recall: 0.9867 - val_loss: 0.0510 - val_accuracy: 0.9838 - val_precision: 0.9872 - val_recall: 0.9825\nEpoch 8/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0330 - accuracy: 0.9898 - precision: 0.9910 - recall: 0.9890 - val_loss: 0.0480 - val_accuracy: 0.9869 - val_precision: 0.9881 - val_recall: 0.9848\nEpoch 9/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0357 - accuracy: 0.9880 - precision: 0.9894 - recall: 0.9870 - val_loss: 0.0450 - val_accuracy: 0.9864 - val_precision: 0.9880 - val_recall: 0.9848\nEpoch 10/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0265 - accuracy: 0.9913 - precision: 0.9920 - recall: 0.9902 - val_loss: 0.0379 - val_accuracy: 0.9889 - val_precision: 0.9899 - val_recall: 0.9879\nEpoch 11/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0245 - accuracy: 0.9916 - precision: 0.9922 - recall: 0.9911 - val_loss: 0.0468 - val_accuracy: 0.9851 - val_precision: 0.9865 - val_recall: 0.9843\nEpoch 12/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0253 - accuracy: 0.9920 - precision: 0.9927 - recall: 0.9915 - val_loss: 0.0380 - val_accuracy: 0.9893 - val_precision: 0.9905 - val_recall: 0.9883\nEpoch 13/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0206 - accuracy: 0.9935 - precision: 0.9939 - recall: 0.9928 - val_loss: 0.0350 - val_accuracy: 0.9903 - val_precision: 0.9913 - val_recall: 0.9887\nEpoch 14/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0168 - accuracy: 0.9947 - precision: 0.9953 - recall: 0.9943 - val_loss: 0.0356 - val_accuracy: 0.9899 - val_precision: 0.9909 - val_recall: 0.9893\nEpoch 15/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0133 - accuracy: 0.9955 - precision: 0.9959 - recall: 0.9952 - val_loss: 0.0419 - val_accuracy: 0.9893 - val_precision: 0.9900 - val_recall: 0.9883\nEpoch 16/100\n439/439 [==============================] - 3s 7ms/step - loss: 0.0129 - accuracy: 0.9959 - precision: 0.9962 - recall: 0.9957 - val_loss: 0.0420 - val_accuracy: 0.9874 - val_precision: 0.9884 - val_recall: 0.9870\nRestoring model weights from the end of the best epoch.\nEpoch 00016: early stopping\nTest loss: 0.039734747260808945\nTest accuracy: 0.9887142777442932\nTest precision: 0.9896981120109558\nTest recall: 0.9881428480148315\nTest f1 score: 0.9887142857142858\nTest AUC for digit: 0 0.9967844920645857\nTest AUC for digit: 1 0.9953059136050564\nTest AUC for digit: 2 0.9949662450022941\nTest AUC for digit: 3 0.9916327889937758\nTest AUC for digit: 4 0.9944156815297652\nTest AUC for digit: 5 0.9953975922796036\nTest AUC for digit: 6 0.9949879467017794\nTest AUC for digit: 7 0.9939484045292735\nTest AUC for digit: 8 0.986462230229477\nTest AUC for digit: 9 0.9932852101356561\n" ] ], [ [ "### The best network ", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix\n\ny_pred = model_cnn_long.predict(x_test)\ny_pred1 = list(np.argmax(y_pred, axis=1))\ny_test1 = list(np.argmax(y_test, axis = 1))\n\nconfusion_matrix = confusion_matrix(y_test1, y_pred1)\n\nprint(confusion_matrix)", "[[654 0 0 0 0 0 0 0 0 0]\n [ 0 765 0 0 0 0 0 1 0 1]\n [ 1 1 697 1 0 0 0 3 1 0]\n [ 0 0 0 707 0 0 0 2 0 0]\n [ 0 0 0 0 670 0 1 0 0 2]\n [ 1 1 0 0 0 648 0 0 0 2]\n [ 1 0 0 1 0 1 697 0 0 0]\n [ 0 0 0 0 0 1 0 745 0 0]\n [ 0 0 0 1 0 0 0 0 683 2]\n [ 0 1 0 0 3 1 0 1 0 703]]\n" ] ], [ [ "<font size=4>We chose model with best accuracy. It was a model with 4 convolutional layers and this is confusion matrix of this model.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d058a5e95e89a2775ae3b25cd2a933ba9119fc9c
112,650
ipynb
Jupyter Notebook
predicting_movie_reviews_with_bert_on_tf_hub.ipynb
bedman3/bert
e82db0480df7dfcc25efea9570a717d335c8fd3c
[ "Apache-2.0" ]
null
null
null
predicting_movie_reviews_with_bert_on_tf_hub.ipynb
bedman3/bert
e82db0480df7dfcc25efea9570a717d335c8fd3c
[ "Apache-2.0" ]
null
null
null
predicting_movie_reviews_with_bert_on_tf_hub.ipynb
bedman3/bert
e82db0480df7dfcc25efea9570a717d335c8fd3c
[ "Apache-2.0" ]
null
null
null
86.321839
9,471
0.678837
[ [ [ "# Copyright 2019 Google Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "#Predicting Movie Review Sentiment with BERT on TF Hub", "_____no_output_____" ], [ "If you’ve been following Natural Language Processing over the past year, you’ve probably heard of BERT: Bidirectional Encoder Representations from Transformers. It’s a neural network architecture designed by Google researchers that’s totally transformed what’s state-of-the-art for NLP tasks, like text classification, translation, summarization, and question answering.\n\nNow that BERT's been added to [TF Hub](https://www.tensorflow.org/hub) as a loadable module, it's easy(ish) to add into existing Tensorflow text pipelines. In an existing pipeline, BERT can replace text embedding layers like ELMO and GloVE. Alternatively, [finetuning](http://wiki.fast.ai/index.php/Fine_tuning) BERT can provide both an accuracy boost and faster training time in many cases.\n\nHere, we'll train a model to predict whether an IMDB movie review is positive or negative using BERT in Tensorflow with tf hub. Some code was adapted from [this colab notebook](https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb). Let's get started!", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom datetime import datetime\n\ntf.logging.set_verbosity(tf.logging.INFO)", "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/usr/local/lib/python3.6/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" ] ], [ [ "In addition to the standard libraries we imported above, we'll need to install BERT's python package.", "_____no_output_____" ] ], [ [ "!pip install bert-tensorflow", "Requirement already satisfied: bert-tensorflow in /usr/local/lib/python3.6/dist-packages (1.0.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from bert-tensorflow) (1.12.0)\n\u001b[33mWARNING: You are using pip version 19.2.1, however version 20.0.2 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ], [ "import bert\nfrom bert import run_classifier\nfrom bert import optimization\nfrom bert import tokenization", "WARNING: Logging before flag parsing goes to stderr.\nW0414 10:19:55.760469 140105573619520 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/bert/optimization.py:87: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\n" ] ], [ [ "Below, we'll set an output directory location to store our model output and checkpoints. This can be a local directory, in which case you'd set OUTPUT_DIR to the name of the directory you'd like to create. If you're running this code in Google's hosted Colab, the directory won't persist after the Colab session ends.\n\nAlternatively, if you're a GCP user, you can store output in a GCP bucket. To do that, set a directory name in OUTPUT_DIR and the name of the GCP bucket in the BUCKET field.\n\nSet DO_DELETE to rewrite the OUTPUT_DIR if it exists. Otherwise, Tensorflow will load existing model checkpoints from that directory (if they exist).", "_____no_output_____" ] ], [ [ "# Set the output directory for saving model file\n# Optionally, set a GCP bucket location\n\nOUTPUT_DIR = 'output_files'#@param {type:\"string\"}\n#@markdown Whether or not to clear/delete the directory and create a new one\nDO_DELETE = False #@param {type:\"boolean\"}\n#@markdown Set USE_BUCKET and BUCKET if you want to (optionally) store model output on GCP bucket.\nUSE_BUCKET = False #@param {type:\"boolean\"}\nBUCKET = 'BUCKET_NAME' #@param {type:\"string\"}\n\nif USE_BUCKET:\n OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET, OUTPUT_DIR)\n from google.colab import auth\n auth.authenticate_user()\n\nif DO_DELETE:\n try:\n tf.gfile.DeleteRecursively(OUTPUT_DIR)\n except:\n # Doesn't matter if the directory didn't exist\n pass\ntf.gfile.MakeDirs(OUTPUT_DIR)\nprint('***** Model output directory: {} *****'.format(OUTPUT_DIR))\n", "***** Model output directory: output_files *****\n" ] ], [ [ "#Data", "_____no_output_____" ], [ "First, let's download the dataset, hosted by Stanford. The code below, which downloads, extracts, and imports the IMDB Large Movie Review Dataset, is borrowed from [this Tensorflow tutorial](https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub).", "_____no_output_____" ] ], [ [ "from tensorflow import keras\nimport os\nimport re\n\n# Load all files from a directory in a DataFrame.\ndef load_directory_data(directory):\n data = {}\n data[\"sentence\"] = []\n data[\"sentiment\"] = []\n for file_path in os.listdir(directory):\n with tf.gfile.GFile(os.path.join(directory, file_path), \"r\") as f:\n data[\"sentence\"].append(f.read())\n data[\"sentiment\"].append(re.match(\"\\d+_(\\d+)\\.txt\", file_path).group(1))\n return pd.DataFrame.from_dict(data)\n\n# Merge positive and negative examples, add a polarity column and shuffle.\ndef load_dataset(directory):\n pos_df = load_directory_data(os.path.join(directory, \"pos\"))\n neg_df = load_directory_data(os.path.join(directory, \"neg\"))\n pos_df[\"polarity\"] = 1\n neg_df[\"polarity\"] = 0\n return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)\n\n# Download and process the dataset files.\ndef download_and_load_datasets(force_download=False):\n dataset = tf.keras.utils.get_file(\n fname=\"aclImdb.tar.gz\", \n origin=\"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\", \n extract=True)\n \n train_df = load_dataset(os.path.join(os.path.dirname(dataset), \n \"aclImdb\", \"train\"))\n test_df = load_dataset(os.path.join(os.path.dirname(dataset), \n \"aclImdb\", \"test\"))\n \n return train_df, test_df\n", "_____no_output_____" ], [ "# train, test = download_and_load_datasets()", "_____no_output_____" ], [ "import pandas as pd\ndef load_dataset():\n train_df = pd.read_csv('data/train.csv')\n test_df = pd.read_csv('data/valid.csv')\n \n return train_df, test_df\n\ntrain, test = load_dataset()", "_____no_output_____" ] ], [ [ "To keep training fast, we'll take a sample of 5000 train and test examples, respectively.", "_____no_output_____" ] ], [ [ "# train = train.sample(5000)\n# test = test.sample(5000)", "_____no_output_____" ], [ "train.columns", "_____no_output_____" ] ], [ [ "For us, our input data is the 'sentence' column and our label is the 'polarity' column (0, 1 for negative and positive, respecitvely)", "_____no_output_____" ] ], [ [ "DATA_COLUMN = 'text'\nLABEL_COLUMN = 'stars'\n# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'\nlabel_list = [1, 2, 3, 4, 5]", "_____no_output_____" ] ], [ [ "#Data Preprocessing\nWe'll need to transform our data into a format BERT understands. This involves two steps. First, we create `InputExample`'s using the constructor provided in the BERT library.\n\n- `text_a` is the text we want to classify, which in this case, is the `Request` field in our Dataframe. \n- `text_b` is used if we're training a model to understand the relationship between sentences (i.e. is `text_b` a translation of `text_a`? Is `text_b` an answer to the question asked by `text_a`?). This doesn't apply to our task, so we can leave `text_b` blank.\n- `label` is the label for our example, i.e. True, False", "_____no_output_____" ] ], [ [ "# Use the InputExample class from BERT's run_classifier code to create examples from the data\ntrain_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example\n text_a = x[DATA_COLUMN], \n text_b = None, \n label = x[LABEL_COLUMN]), axis = 1)\n\ntest_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None, \n text_a = x[DATA_COLUMN], \n text_b = None, \n label = x[LABEL_COLUMN]), axis = 1)", "_____no_output_____" ] ], [ [ "Next, we need to preprocess our data so that it matches the data BERT was trained on. For this, we'll need to do a couple of things (but don't worry--this is also included in the Python library):\n\n\n1. Lowercase our text (if we're using a BERT lowercase model)\n2. Tokenize it (i.e. \"sally says hi\" -> [\"sally\", \"says\", \"hi\"])\n3. Break words into WordPieces (i.e. \"calling\" -> [\"call\", \"##ing\"])\n4. Map our words to indexes using a vocab file that BERT provides\n5. Add special \"CLS\" and \"SEP\" tokens (see the [readme](https://github.com/google-research/bert))\n6. Append \"index\" and \"segment\" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))\n\nHappily, we don't have to worry about most of these details.\n\n\n", "_____no_output_____" ], [ "To start, we'll need to load a vocabulary file and lowercasing information directly from the BERT tf hub module:", "_____no_output_____" ] ], [ [ "# This is a path to an uncased (all lowercase) version of BERT\nBERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n\ndef create_tokenizer_from_hub_module():\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(BERT_MODEL_HUB)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n \n return bert.tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n\ntokenizer = create_tokenizer_from_hub_module()", "I0414 10:19:59.282109 140105573619520 saver.py:1499] Saver not created because there are no variables in the graph to restore\nW0414 10:20:00.810749 140105573619520 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/bert/tokenization.py:125: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n\n" ] ], [ [ "Great--we just learned that the BERT model we're using expects lowercase data (that's what stored in tokenization_info[\"do_lower_case\"]) and we also loaded BERT's vocab file. We also created a tokenizer, which breaks words into word pieces:", "_____no_output_____" ] ], [ [ "tokenizer.tokenize(\"This here's an example of using the BERT tokenizer\")", "_____no_output_____" ] ], [ [ "Using our tokenizer, we'll call `run_classifier.convert_examples_to_features` on our InputExamples to convert them into features BERT understands.", "_____no_output_____" ] ], [ [ "# We'll set sequences to be at most 128 tokens long.\nMAX_SEQ_LENGTH = 128\n# Convert our train and test features to InputFeatures that BERT understands.\ntrain_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)\ntest_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)", "W0414 10:20:00.919758 140105573619520 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/bert/run_classifier.py:774: The name tf.logging.info is deprecated. Please use tf.compat.v1.logging.info instead.\n\nI0414 10:20:00.921136 140105573619520 run_classifier.py:774] Writing example 0 of 20000\nI0414 10:20:00.929281 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:00.930946 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:00.931859 140105573619520 run_classifier.py:464] tokens: [CLS] \" what do you do for recreation ? \" \" oh , the usual . i bowl . drive around . the occasional acid flashback . \" i do not actually like bowling . it ' s my nature . i only like things that i am good at and i am not even certain i would like to be good at bowling so i had my reservations when being invited to hang with the optimism club kids but i love these kids so . . . \" fuck it , dude , let ' s go bowling . \" i do have to admit ; i really had a blast here . it is vintage and sc ##um ##my on the outside which to me [SEP]\nI0414 10:20:00.932403 140105573619520 run_classifier.py:465] input_ids: 101 1000 2054 2079 2017 2079 2005 8640 1029 1000 1000 2821 1010 1996 5156 1012 1045 4605 1012 3298 2105 1012 1996 8138 5648 21907 1012 1000 1045 2079 2025 2941 2066 9116 1012 2009 1005 1055 2026 3267 1012 1045 2069 2066 2477 2008 1045 2572 2204 2012 1998 1045 2572 2025 2130 3056 1045 2052 2066 2000 2022 2204 2012 9116 2061 1045 2018 2026 17829 2043 2108 4778 2000 6865 2007 1996 27451 2252 4268 2021 1045 2293 2122 4268 2061 1012 1012 1012 1000 6616 2009 1010 12043 1010 2292 1005 1055 2175 9116 1012 1000 1045 2079 2031 2000 6449 1025 1045 2428 2018 1037 8479 2182 1012 2009 2003 13528 1998 8040 2819 8029 2006 1996 2648 2029 2000 2033 102\nI0414 10:20:00.932891 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\nI0414 10:20:00.933523 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.934118 140105573619520 run_classifier.py:468] label: 4 (id = 3)\nI0414 10:20:00.935426 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:00.936100 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:00.936616 140105573619520 run_classifier.py:464] tokens: [CLS] the burger ##s are good and they have good fries too . i used to get the ib ##ion rings but i can ' t eat onion now unfortunately . would recommend to anyone for lunch . [SEP]\nI0414 10:20:00.937265 140105573619520 run_classifier.py:465] input_ids: 101 1996 15890 2015 2024 2204 1998 2027 2031 2204 22201 2205 1012 1045 2109 2000 2131 1996 21307 3258 7635 2021 1045 2064 1005 1056 4521 20949 2085 6854 1012 2052 16755 2000 3087 2005 6265 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.937844 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.938503 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.939382 140105573619520 run_classifier.py:468] label: 5 (id = 4)\nI0414 10:20:00.942288 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:00.942847 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:00.943509 140105573619520 run_classifier.py:464] tokens: [CLS] been there three times , twice they didn ' t have gu ##aca ##mo ##le . took the family on thursday night to hear live reggae , which they had confirmed when i called earlier in the day . ordered gu ##ac . . . no gu ##ac . ordered beef with mole . . . we ' re out of mole . wife ordered car ##ni ##tas , after long wait , waitress came back and informed her they were out of car ##ni ##tas . i think any problems relate to lai ##sse ##z fair ##e management . service is slow because they don ' t pay for enough staff . food takes long b / c there is only room for one cook [SEP]\nI0414 10:20:00.944194 140105573619520 run_classifier.py:465] input_ids: 101 2042 2045 2093 2335 1010 3807 2027 2134 1005 1056 2031 19739 19629 5302 2571 1012 2165 1996 2155 2006 9432 2305 2000 2963 2444 15662 1010 2029 2027 2018 4484 2043 1045 2170 3041 1999 1996 2154 1012 3641 19739 6305 1012 1012 1012 2053 19739 6305 1012 3641 12486 2007 16709 1012 1012 1012 2057 1005 2128 2041 1997 16709 1012 2564 3641 2482 3490 10230 1010 2044 2146 3524 1010 13877 2234 2067 1998 6727 2014 2027 2020 2041 1997 2482 3490 10230 1012 1045 2228 2151 3471 14396 2000 21110 11393 2480 4189 2063 2968 1012 2326 2003 4030 2138 2027 2123 1005 1056 3477 2005 2438 3095 1012 2833 3138 2146 1038 1013 1039 2045 2003 2069 2282 2005 2028 5660 102\nI0414 10:20:00.944772 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\nI0414 10:20:00.945364 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.945957 140105573619520 run_classifier.py:468] label: 2 (id = 1)\nI0414 10:20:00.947746 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:00.948391 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:00.948909 140105573619520 run_classifier.py:464] tokens: [CLS] pretty excited to discover this cal ##i gas ##tro ##pu ##b was arriving at the swan ##ky downtown summer ##lin . great beers and our server was not only fun but extremely knowledge ##able and turned me on to the w ##him ##sic ##al and ta ##sty golden monkey . outdoor patio is great place for a late night meal . and best of al locals get half off their bill on mondays . how good is that ? [SEP]\nI0414 10:20:00.949487 140105573619520 run_classifier.py:465] input_ids: 101 3492 7568 2000 7523 2023 10250 2072 3806 13181 14289 2497 2001 7194 2012 1996 10677 4801 5116 2621 4115 1012 2307 18007 1998 2256 8241 2001 2025 2069 4569 2021 5186 3716 3085 1998 2357 2033 2006 2000 1996 1059 14341 19570 2389 1998 11937 21756 3585 10608 1012 7254 19404 2003 2307 2173 2005 1037 2397 2305 7954 1012 1998 2190 1997 2632 10575 2131 2431 2125 2037 3021 2006 28401 1012 2129 2204 2003 2008 1029 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.950110 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.950884 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.951302 140105573619520 run_classifier.py:468] label: 5 (id = 4)\nI0414 10:20:00.953340 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:00.954030 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:00.954628 140105573619520 run_classifier.py:464] tokens: [CLS] i have been very pleased with the care that i receive here . i have been going to them since 2002 for five different reasons and i love them . they are thorough , caring and very dedicated to see you recover completely . their front office staff is amazing . they will work with you to assure that you are getting everything you need and make sure that your insurance and referring physician are informed about your care . i have had pablo and cory as my therapist and i would recommend them to anyone that needs physical therapy . [SEP]\nI0414 10:20:00.955208 140105573619520 run_classifier.py:465] input_ids: 101 1045 2031 2042 2200 7537 2007 1996 2729 2008 1045 4374 2182 1012 1045 2031 2042 2183 2000 2068 2144 2526 2005 2274 2367 4436 1998 1045 2293 2068 1012 2027 2024 16030 1010 11922 1998 2200 4056 2000 2156 2017 8980 3294 1012 2037 2392 2436 3095 2003 6429 1012 2027 2097 2147 2007 2017 2000 14306 2008 2017 2024 2893 2673 2017 2342 1998 2191 2469 2008 2115 5427 1998 7727 7522 2024 6727 2055 2115 2729 1012 1045 2031 2018 11623 1998 18342 2004 2026 19294 1998 1045 2052 16755 2068 2000 3087 2008 3791 3558 7242 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.955861 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.956392 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:00.956864 140105573619520 run_classifier.py:468] label: 5 (id = 4)\nI0414 10:20:16.874609 140105573619520 run_classifier.py:774] Writing example 10000 of 20000\nI0414 10:20:32.753437 140105573619520 run_classifier.py:774] Writing example 0 of 2000\nI0414 10:20:32.754873 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:32.755411 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:32.756034 140105573619520 run_classifier.py:464] tokens: [CLS] food is sometimes sometimes great and something ##s just good . wasn ' t a fan of their br ##un ##ch but lunch and dinner is good . service is hit and miss . [SEP]\nI0414 10:20:32.756607 140105573619520 run_classifier.py:465] input_ids: 101 2833 2003 2823 2823 2307 1998 2242 2015 2074 2204 1012 2347 1005 1056 1037 5470 1997 2037 7987 4609 2818 2021 6265 1998 4596 2003 2204 1012 2326 2003 2718 1998 3335 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.757318 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.757845 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.758347 140105573619520 run_classifier.py:468] label: 4 (id = 3)\nI0414 10:20:32.759270 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:32.759799 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:32.760307 140105573619520 run_classifier.py:464] tokens: [CLS] they should seriously market the sausage and gr ##av ##y . you may hear chicken sausage but - good lord this is some amazing sausage biscuits and gr ##av ##y . [SEP]\nI0414 10:20:32.760833 140105573619520 run_classifier.py:465] input_ids: 101 2027 2323 5667 3006 1996 24165 1998 24665 11431 2100 1012 2017 2089 2963 7975 24165 2021 1011 2204 2935 2023 2003 2070 6429 24165 27529 1998 24665 11431 2100 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.761393 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.762058 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.762672 140105573619520 run_classifier.py:468] label: 5 (id = 4)\nI0414 10:20:32.766637 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:32.767348 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:32.767922 140105573619520 run_classifier.py:464] tokens: [CLS] while i have had several average salon and spa services at holt ##s this review is for my hair coloring experiences with ina . i have had my blonde highlights done by ina on and off for a few years . i was always very happy with the results , yet i was kept trying to look for a more reasonably priced place because it is very expensive indeed ( in my case about $ 300 with the tone ##r and blow dry , which is not included ) . and so it was that a month before my wedding i went to a very rep ##utable downtown salon ( supposedly the best in to ) to have my highlights done . to make a very [SEP]\nI0414 10:20:32.768587 140105573619520 run_classifier.py:465] input_ids: 101 2096 1045 2031 2018 2195 2779 11090 1998 12403 2578 2012 12621 2015 2023 3319 2003 2005 2026 2606 22276 6322 2007 27118 1012 1045 2031 2018 2026 9081 11637 2589 2011 27118 2006 1998 2125 2005 1037 2261 2086 1012 1045 2001 2467 2200 3407 2007 1996 3463 1010 2664 1045 2001 2921 2667 2000 2298 2005 1037 2062 16286 21125 2173 2138 2009 2003 2200 6450 5262 1006 1999 2026 2553 2055 1002 3998 2007 1996 4309 2099 1998 6271 4318 1010 2029 2003 2025 2443 1007 1012 1998 2061 2009 2001 2008 1037 3204 2077 2026 5030 1045 2253 2000 1037 2200 16360 23056 5116 11090 1006 10743 1996 2190 1999 2000 1007 2000 2031 2026 11637 2589 1012 2000 2191 1037 2200 102\nI0414 10:20:32.769200 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\nI0414 10:20:32.769784 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.770305 140105573619520 run_classifier.py:468] label: 5 (id = 4)\nI0414 10:20:32.772956 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:32.773533 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:32.774001 140105573619520 run_classifier.py:464] tokens: [CLS] i have been coming to nc ##s for almost two years . i had been going to a salon in the mall for about a year and i noticed that my hair was getting extremely dry . i used some products that they recommended but nothing seemed to help . a friend recommended sara d so i made an appointment with her hoping that she would be able to make some suggestions for my dry and damaged hair . she suggested one product and gave me tips for drying and styling that would help . since that first visit , my hair has come along way . sara always gives me suggestions for keeping my hair healthy and she ' ll recommend different products in the [SEP]\nI0414 10:20:32.774591 140105573619520 run_classifier.py:465] input_ids: 101 1045 2031 2042 2746 2000 13316 2015 2005 2471 2048 2086 1012 1045 2018 2042 2183 2000 1037 11090 1999 1996 6670 2005 2055 1037 2095 1998 1045 4384 2008 2026 2606 2001 2893 5186 4318 1012 1045 2109 2070 3688 2008 2027 6749 2021 2498 2790 2000 2393 1012 1037 2767 6749 7354 1040 2061 1045 2081 2019 6098 2007 2014 5327 2008 2016 2052 2022 2583 2000 2191 2070 15690 2005 2026 4318 1998 5591 2606 1012 2016 4081 2028 4031 1998 2435 2033 10247 2005 17462 1998 20724 2008 2052 2393 1012 2144 2008 2034 3942 1010 2026 2606 2038 2272 2247 2126 1012 7354 2467 3957 2033 15690 2005 4363 2026 2606 7965 1998 2016 1005 2222 16755 2367 3688 1999 1996 102\nI0414 10:20:32.775171 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\nI0414 10:20:32.775726 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.776291 140105573619520 run_classifier.py:468] label: 5 (id = 4)\nI0414 10:20:32.778075 140105573619520 run_classifier.py:461] *** Example ***\nI0414 10:20:32.778569 140105573619520 run_classifier.py:462] guid: None\nI0414 10:20:32.779173 140105573619520 run_classifier.py:464] tokens: [CLS] this place should be renamed to \" we take our sweet ass time \" large fried chicken . j ##ks , but seriously . there was no line , and only 3 people waiting in front of me for their order , and it still took a good 15 minutes to receive my order . i ordered the regular chicken w / the strawberry drink ( forgot what it ' s called ) . the chicken was pretty good , i must admit . the drink , however , was decent - tasted like strawberry sp ##rite - but not worth ~ $ 3 . 50 . come here only if you have time to spare . [SEP]\nI0414 10:20:32.779727 140105573619520 run_classifier.py:465] input_ids: 101 2023 2173 2323 2022 4096 2000 1000 2057 2202 2256 4086 4632 2051 1000 2312 13017 7975 1012 1046 5705 1010 2021 5667 1012 2045 2001 2053 2240 1010 1998 2069 1017 2111 3403 1999 2392 1997 2033 2005 2037 2344 1010 1998 2009 2145 2165 1037 2204 2321 2781 2000 4374 2026 2344 1012 1045 3641 1996 3180 7975 1059 1013 1996 16876 4392 1006 9471 2054 2009 1005 1055 2170 1007 1012 1996 7975 2001 3492 2204 1010 1045 2442 6449 1012 1996 4392 1010 2174 1010 2001 11519 1011 12595 2066 16876 11867 17625 1011 2021 2025 4276 1066 1002 1017 1012 2753 1012 2272 2182 2069 2065 2017 2031 2051 2000 8622 1012 102 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.780362 140105573619520 run_classifier.py:466] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.780905 140105573619520 run_classifier.py:467] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nI0414 10:20:32.781480 140105573619520 run_classifier.py:468] label: 3 (id = 2)\n" ] ], [ [ "#Creating a model\n\nNow that we've prepared our data, let's focus on building a model. `create_model` does just this below. First, it loads the BERT tf hub module again (this time to extract the computation graph). Next, it creates a single new layer that will be trained to adapt BERT to our sentiment task (i.e. classifying whether a movie review is positive or negative). This strategy of using a mostly trained model is called [fine-tuning](http://wiki.fast.ai/index.php/Fine_tuning).", "_____no_output_____" ] ], [ [ "def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,\n num_labels):\n \"\"\"Creates a classification model.\"\"\"\n\n bert_module = hub.Module(\n BERT_MODEL_HUB,\n trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predicted_labels, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, predicted_labels, log_probs)\n", "_____no_output_____" ] ], [ [ "Next we'll wrap our model function in a `model_fn_builder` function that adapts our model to work for training, evaluation, and prediction.", "_____no_output_____" ] ], [ [ "# model_fn_builder actually creates our model function\n# using the passed parameters for num_labels, learning_rate, etc.\ndef model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n \n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics. \n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels) \n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels) \n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels) \n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn\n", "_____no_output_____" ], [ "# Compute train and warmup steps from batch size\n# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\nBATCH_SIZE = 32\nLEARNING_RATE = 2e-5\nNUM_TRAIN_EPOCHS = 3.0\n# Warmup is a period of time where hte learning rate \n# is small and gradually increases--usually helps training.\nWARMUP_PROPORTION = 0.1\n# Model configs\nSAVE_CHECKPOINTS_STEPS = 500\nSAVE_SUMMARY_STEPS = 100", "_____no_output_____" ], [ "# Compute # train and warmup steps from batch size\nnum_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\nnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)", "_____no_output_____" ], [ "# Specify outpit directory and number of checkpoint steps to save\nrun_config = tf.estimator.RunConfig(\n model_dir=OUTPUT_DIR,\n save_summary_steps=SAVE_SUMMARY_STEPS,\n save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)", "_____no_output_____" ], [ "model_fn = model_fn_builder(\n num_labels=len(label_list),\n learning_rate=LEARNING_RATE,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps)\n\nestimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={\"batch_size\": BATCH_SIZE})\n", "I0414 10:20:36.028532 140105573619520 estimator.py:209] Using config: {'_model_dir': 'output_files', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 500, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\ngraph_options {\n rewrite_options {\n meta_optimizer_iterations: ONE\n }\n}\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f6be0248208>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n" ] ], [ [ "Next we create an input builder function that takes our training feature set (`train_features`) and produces a generator. This is a pretty standard design pattern for working with Tensorflow [Estimators](https://www.tensorflow.org/guide/estimators).", "_____no_output_____" ] ], [ [ "# Create an input function for training. drop_remainder = True for using TPUs.\ntrain_input_fn = bert.run_classifier.input_fn_builder(\n features=train_features,\n seq_length=MAX_SEQ_LENGTH,\n is_training=True,\n drop_remainder=False)", "_____no_output_____" ] ], [ [ "Now we train our model! For me, using a Colab notebook running on Google's GPUs, my training time was about 14 minutes.", "_____no_output_____" ] ], [ [ "print(f'Beginning Training!')\ncurrent_time = datetime.now()\nestimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\nprint(\"Training took time \", datetime.now() - current_time)", "W0414 10:20:36.153037 140105573619520 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/training_util.py:236: Variable.initialized_value (from tensorflow.python.ops.variables) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse Variable.read_value. Variables in 2.X are initialized automatically both in eager and graph (inside tf.defun) contexts.\n" ] ], [ [ "Now let's use our test data to see how well our model did:", "_____no_output_____" ] ], [ [ "test_input_fn = run_classifier.input_fn_builder(\n features=test_features,\n seq_length=MAX_SEQ_LENGTH,\n is_training=False,\n drop_remainder=False)", "_____no_output_____" ], [ "estimator.evaluate(input_fn=test_input_fn, steps=None)", "_____no_output_____" ] ], [ [ "Now let's write code to make predictions on new sentences:", "_____no_output_____" ] ], [ [ "def getPrediction(in_sentences):\n labels = [\"Negative\", \"Positive\"]\n input_examples = [run_classifier.InputExample(guid=\"\", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, \"\" is just a dummy label\n input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)\n predictions = estimator.predict(predict_input_fn)\n return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]", "_____no_output_____" ], [ "pred_sentences = [\n \"That movie was absolutely awful\",\n \"The acting was a bit lacking\",\n \"The film was creative and surprising\",\n \"Absolutely fantastic!\"\n]", "_____no_output_____" ], [ "predictions = getPrediction(pred_sentences)", "_____no_output_____" ] ], [ [ "Voila! We have a sentiment classifier!", "_____no_output_____" ] ], [ [ "predictions", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d058a9333c22bf25fb4124948c991596dfa1d188
7,502
ipynb
Jupyter Notebook
notebooks/Clairvoyant.ipynb
oughtinc/psj
e7c5e987039ce7978234e137167991a61371604b
[ "MIT" ]
5
2018-07-16T23:01:40.000Z
2019-08-18T14:49:06.000Z
notebooks/Clairvoyant.ipynb
oughtinc/psj
e7c5e987039ce7978234e137167991a61371604b
[ "MIT" ]
1
2018-07-09T17:33:52.000Z
2018-07-09T17:33:52.000Z
notebooks/Clairvoyant.ipynb
oughtinc/psj
e7c5e987039ce7978234e137167991a61371604b
[ "MIT" ]
null
null
null
36.417476
174
0.603306
[ [ [ "import numpy as np\nimport pandas as pd\nimport scipy\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime\n%matplotlib inline\nimport matplotlib\nfrom datetime import datetime\nimport os\nfrom scipy import stats\n\nfrom definitions import HUMAN_DATA_DIR, ROOT_DIR\nfrom data.load_from_csv import get_content_datasets", "_____no_output_____" ], [ "def ClairvoyantCF(test_dataset, train_dataset, answers_dict):\n \"\"\"Takes datasets and {item_id: True/False} dict and returns\n mean mse simply predicting 0/100\"\"\"\n total_score = 0\n for i, rating in enumerate(test_dataset.ratings):\n try:\n if answers_dict[test_dataset.item_ids[i]]:\n total_score += (rating[2] - 1.0)**2\n else:\n total_score += (rating[2] - 0)**2\n except:\n print(i, test_dataset.item_ids[i])\n mean_mse = total_score / len(test_dataset.ratings)\n print(\"Using Clairvoyant CF, got total val score {:.3f}\".format(mean_mse))\n return\n\ndef ClairvoyantAdjustedCF(test_dataset, train_dataset, answers_dict):\n \"\"\"Takes datasets and {item_id: True/False} dict and returns\n mean mse simply predicting 0/100\"\"\"\n tot_true = 0\n tot_false = 0\n true_count = 0\n false_count = 0\n \n for i, rating in enumerate(train_dataset.ratings):\n if not np.isnan(rating[2]):\n if answers_dict[train_dataset.item_ids[i]]:\n tot_true += rating[2]\n true_count += 1\n else:\n tot_false += rating[2]\n false_count += 1 \n avg_true = tot_true / true_count\n avg_false = tot_false / false_count\n \n total_score = 0\n for i, rating in enumerate(test_dataset.ratings):\n if answers_dict[test_dataset.item_ids[i]]:\n total_score += (rating[2] - avg_true)**2\n else:\n total_score += (rating[2] - avg_false)**2\n mean_mse = total_score / len(test_dataset.ratings)\n print(\"Using Clairvoyant Adjusted CF, got total val score {:.3f}\".format(mean_mse))\n return", "_____no_output_____" ], [ "fermi_answers = pd.read_csv(os.path.join(HUMAN_DATA_DIR, 'fermi', 'answers.csv')).drop('Unnamed: 0', axis=1).set_index('item_id').T.to_dict('index')['answer']\npolitifact_answers = pd.read_csv(os.path.join(HUMAN_DATA_DIR, 'politifact', 'answers.csv')).drop('Unnamed: 0', axis=1).set_index('item_id').T.to_dict('index')['answer']", "_____no_output_____" ], [ "## Fermi \nprint('Fermi\\nUnmasked:')\nunmasked_fermi, unmasked_val_fermi, _ = get_content_datasets(task='fermi', sparsity='unmasked')\nClairvoyantCF(unmasked_val_fermi, unmasked_fermi, fermi_answers)\nClairvoyantAdjustedCF(unmasked_val_fermi, unmasked_fermi, fermi_answers)\nprint('\\nLight Masking:')\nlight_fermi, unmasked_val_fermi, _ = get_content_datasets(task='fermi', sparsity='light')\nClairvoyantCF(unmasked_val_fermi, light_fermi, fermi_answers)\nClairvoyantAdjustedCF(unmasked_val_fermi, light_fermi, fermi_answers)\nprint('\\nHeavy Masking:')\nheavy_fermi, unmasked_val_fermi, _ = get_content_datasets(task='fermi', sparsity='heavy')\nClairvoyantCF(unmasked_val_fermi, heavy_fermi, fermi_answers)\nClairvoyantAdjustedCF(unmasked_val_fermi, heavy_fermi, fermi_answers)", "Fermi\nUnmasked:\nUsing Clairvoyant CF, got total val score 0.216\nUsing Clairvoyant Adjusted CF, got total val score 0.111\n\nLight Masking:\nUsing Clairvoyant CF, got total val score 0.216\nUsing Clairvoyant Adjusted CF, got total val score 0.111\n\nHeavy Masking:\nUsing Clairvoyant CF, got total val score 0.216\nUsing Clairvoyant Adjusted CF, got total val score 0.111\n" ], [ "## Politifact\nprint('Politifact\\nUnmasked:')\nunmasked_politifact, unmasked_val_politifact, _ = get_content_datasets(task='politifact', sparsity='unmasked')\nClairvoyantCF(unmasked_val_politifact, unmasked_politifact, politifact_answers)\nClairvoyantAdjustedCF(unmasked_val_politifact, unmasked_politifact, politifact_answers)\nprint('\\nPolitifact Masking:')\nlight_politifact, unmasked_val_politifact, _ = get_content_datasets(task='politifact', sparsity='light')\nClairvoyantCF(unmasked_val_politifact, light_politifact, politifact_answers)\nClairvoyantAdjustedCF(unmasked_val_politifact, light_politifact, politifact_answers)\nprint('\\nPolitifact Masking:')\nheavy_politifact, unmasked_val_politifact, _ = get_content_datasets(task='politifact', sparsity='heavy')\nClairvoyantCF(unmasked_val_politifact, heavy_politifact, politifact_answers)\nClairvoyantAdjustedCF(unmasked_val_politifact, heavy_politifact, politifact_answers)", "Politifact\nUnmasked:\nLoading w2v dict\nLoaded Word2Vec dict: 27.13s\nNumber of words in corpus: 400001\nUsing Clairvoyant CF, got total val score 0.242\nUsing Clairvoyant Adjusted CF, got total val score 0.112\n\nPolitifact Masking:\nLoading w2v dict\nLoaded Word2Vec dict: 23.40s\nNumber of words in corpus: 400001\nUsing Clairvoyant CF, got total val score 0.242\nUsing Clairvoyant Adjusted CF, got total val score 0.112\n\nPolitifact Masking:\nLoading w2v dict\nLoaded Word2Vec dict: 18.82s\nNumber of words in corpus: 400001\nUsing Clairvoyant CF, got total val score 0.242\nUsing Clairvoyant Adjusted CF, got total val score 0.112\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d058abc1c80a6205989478e4fa5c602aed6bf5b9
48,607
ipynb
Jupyter Notebook
notebooks/data-prep/UCI-Daphnet.ipynb
HPI-Information-Systems/TimeEval
9b2717b89decd57dd09e04ad94c120f13132d7b8
[ "MIT" ]
2
2022-01-29T03:46:31.000Z
2022-02-14T14:06:35.000Z
notebooks/data-prep/UCI-Daphnet.ipynb
HPI-Information-Systems/TimeEval
9b2717b89decd57dd09e04ad94c120f13132d7b8
[ "MIT" ]
null
null
null
notebooks/data-prep/UCI-Daphnet.ipynb
HPI-Information-Systems/TimeEval
9b2717b89decd57dd09e04ad94c120f13132d7b8
[ "MIT" ]
null
null
null
47.794494
228
0.476228
[ [ [ "# UCI Daphnet dataset (Freezing of gait for Parkinson's disease patients)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport os\nfrom typing import List\nfrom pathlib import Path\nfrom config import data_raw_folder, data_processed_folder\nfrom timeeval import Datasets\nimport matplotlib\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "%matplotlib inline\nplt.rcParams['figure.figsize'] = (20, 10)", "_____no_output_____" ], [ "dataset_collection_name = \"Daphnet\"\nsource_folder = Path(data_raw_folder) / \"UCI ML Repository/Daphnet/dataset\"\ntarget_folder = Path(data_processed_folder)\n\nprint(f\"Looking for source datasets in {source_folder.absolute()} and\\nsaving processed datasets in {target_folder.absolute()}\")", "Looking for source datasets in /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset and\nsaving processed datasets in /home/projects/akita/data/benchmark-data/data-processed\n" ], [ "train_type = \"unsupervised\"\ntrain_is_normal = False\ninput_type = \"multivariate\"\ndatetime_index = True\ndataset_type = \"real\"\n\n# create target directory\ndataset_subfolder = os.path.join(input_type, dataset_collection_name)\ntarget_subfolder = os.path.join(target_folder, dataset_subfolder)\ntry:\n os.makedirs(target_subfolder)\n print(f\"Created directories {target_subfolder}\")\nexcept FileExistsError:\n print(f\"Directories {target_subfolder} already exist\")\n pass\n\ndm = Datasets(target_folder)\nexperiments = [f for f in source_folder.iterdir()]\nexperiments", "Directories /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet already exist\n" ], [ "columns = [\"timestamp\", \"ankle_horiz_fwd\", \"ankle_vert\", \"ankle_horiz_lateral\", \"leg_horiz_fwd\", \"leg_vert\", \"leg_horiz_lateral\",\n \"trunk_horiz_fwd\", \"trunk_vert\", \"trunk_horiz_lateral\", \"is_anomaly\"]\n\ndef transform_experiment_file(path: Path) -> List[pd.DataFrame]:\n df = pd.read_csv(path, sep=\" \", header=None)\n df.columns = columns\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"], unit=\"ms\")\n # slice out experiments (0 annotation shows unrelated data points (preparation/briefing/...))\n s_group = df[\"is_anomaly\"].isin([1, 2])\n s_diff = s_group.shift(-1) - s_group\n\n starts = (df[s_diff == 1].index + 1).values # first point has annotation 0 --> index + 1\n ends = df[s_diff == -1].index.values\n \n dfs = []\n for start, end in zip(starts, ends):\n df1 = df.iloc[start:end].copy()\n df1[\"is_anomaly\"] = (df1[\"is_anomaly\"] == 2).astype(int)\n dfs.append(df1)\n return dfs", "_____no_output_____" ], [ "for exp in experiments:\n # transform file to get datasets\n datasets = transform_experiment_file(exp)\n for i, df in enumerate(datasets):\n # get target filenames\n experiment_name = os.path.splitext(exp.name)[0]\n dataset_name = f\"{experiment_name}E{i}\"\n filename = f\"{dataset_name}.test.csv\"\n path = os.path.join(dataset_subfolder, filename)\n target_filepath = os.path.join(target_subfolder, filename)\n\n # calc length and save in file\n dataset_length = len(df)\n df.to_csv(target_filepath, index=False)\n print(f\"Processed source dataset {exp} -> {target_filepath}\")\n\n # save metadata\n dm.add_dataset((dataset_collection_name, dataset_name),\n train_path = None,\n test_path = path,\n dataset_type = dataset_type,\n datetime_index = datetime_index,\n split_at = None,\n train_type = train_type,\n train_is_normal = train_is_normal,\n input_type = input_type,\n dataset_length = dataset_length\n )\n\ndm.save()", "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S02R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S02R02E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S03R03.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S03R03E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S10R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S10R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S10R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S10R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S01R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S01R02E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S09R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S09R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S09R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S09R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S09R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S09R01E2.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S09R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S09R01E3.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S09R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S09R01E4.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S07R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S07R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S06R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S06R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S06R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S06R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S06R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S06R01E2.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S03R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S03R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S03R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S03R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S02R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S02R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S07R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S07R02E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S04R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S04R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S04R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S04R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S05R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S05R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S05R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S05R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S05R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S05R01E2.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S05R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S05R01E3.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S08R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S08R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S08R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S08R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S08R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S08R01E2.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S08R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S08R01E3.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S06R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S06R02E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S06R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S06R02E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S01R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S01R01E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S01R01.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S01R01E1.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S03R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S03R02E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S05R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S05R02E0.test.csv\nProcessed source dataset /home/projects/akita/data/benchmark-data/data-raw/UCI ML Repository/Daphnet/dataset/S05R02.txt -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/Daphnet/S05R02E1.test.csv\n" ], [ "dm.refresh()\ndm.df().loc[(slice(dataset_collection_name,dataset_collection_name), slice(None))]", "_____no_output_____" ] ], [ [ "## Experimentation\n\nAnnotations\n\n- `0`: not part of the experiment.\n For instance the sensors are installed on the user or the user is performing activities unrelated to the experimental protocol, such as debriefing\n- `1`: experiment, no freeze (can be any of stand, walk, turn)\n- `2`: freeze", "_____no_output_____" ] ], [ [ "columns = [\"timestamp\", \"ankle_horiz_fwd\", \"ankle_vert\", \"ankle_horiz_lateral\", \"leg_horiz_fwd\", \"leg_vert\", \"leg_horiz_lateral\",\n \"trunk_horiz_fwd\", \"trunk_vert\", \"trunk_horiz_lateral\", \"annotation\"]\ndf1 = pd.read_csv(source_folder / \"S01R01.txt\", sep=' ', header=None)\ndf1.columns = columns\ndf1[\"timestamp\"] = pd.to_datetime(df1[\"timestamp\"], unit=\"ms\")\ndf1", "_____no_output_____" ], [ "columns = [c for c in columns if c not in [\"timestamp\", \"annotation\"]]\ndf_plot = df1.set_index(\"timestamp\", drop=True)#.loc[\"1970-01-01 00:15:00\":\"1970-01-01 00:16:00\"]\ndf_plot.plot(y=columns, figsize=(20,10))\ndf_plot[\"annotation\"].plot(secondary_y=True)\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "s_group = df1[\"annotation\"].isin([1, 2])\ns_diff = s_group.shift(-1) - s_group\n\nstarts = (df1[s_diff == 1].index + 1).values\nends = df1[s_diff == -1].index.values\nstarts, ends", "_____no_output_____" ], [ "dfs = [df1.iloc[start:end] for start, end in zip(starts, ends)]\nlen(dfs)", "_____no_output_____" ], [ "columns = [c for c in columns if c not in [\"timestamp\", \"annotation\"]]\nfor df in dfs:\n df = df.set_index(\"timestamp\", drop=True)\n df.plot(y=columns, figsize=(20,10))\n df[\"annotation\"].plot(secondary_y=True)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d058b61d1d479f098a1d68573b02b45e9118d4e2
78,729
ipynb
Jupyter Notebook
.ipynb
AGreen0/BlackDeathProject
39ee198fa24c3a956084c22b5e1aa2f38b51f2a6
[ "BSL-1.0" ]
null
null
null
.ipynb
AGreen0/BlackDeathProject
39ee198fa24c3a956084c22b5e1aa2f38b51f2a6
[ "BSL-1.0" ]
null
null
null
.ipynb
AGreen0/BlackDeathProject
39ee198fa24c3a956084c22b5e1aa2f38b51f2a6
[ "BSL-1.0" ]
null
null
null
159.693712
23,432
0.870543
[ [ [ "#### Amy Green - 200930437 \n\n# <center> 5990M: Introduction to Programming for Geographical Information Analysis - Core Skills </center>\n\n## <center><u> __**Assignment 2: Investigating the Black Death**__ </u></center>\n-------------------------------------------------------------", "_____no_output_____" ], [ "### Project Aim\n\n<p>The aim of the project hopes to build a model, based upon initial agent-based framework coding schemes, that generates an analysis into an aspect of 'The Black Death'. This project intends to calculate the fatalities from The Great Plague of London via the known population densities of London parishes in 1665. The generation of this measure from historical data will allow any correlation to be investigated and an overall map of total deaths to be produced. Furthermore, the final code should allow for manipulation in terms of changing parameter weights to investigate possible scenarios that could have ensued. </p>", "_____no_output_____" ], [ "### Context\n<p>The Great Plague of London (1665-1666) was the last occurrence of the fatal ‘Black Death’ Plague that swept across Europe in the 1300s. The bubonic plague caused an epidemic across the 17th century parishes of London, as well as some smaller areas of the UK. The overcrowded city and hot Summer became a breeding ground for the bacterium <i>Yersinia pestis</i> disseminated by rat fleas – the known cause of the plague. Transmission was inevitable due to the high poverty levels, low sanitation rates, and open sewers in closely packed waste-filled streets; especially in poorer areas (Trueman, 2015). Deaths started slowly within the St. Giles’s Parish but rose alarmingly as documented by the weekly ‘Bill of Mortality’ that was legally required from each parish at the time (Defoe, 2005). The numbers of deaths slowed after 18 months due to quarantines, much of the population moving to the country and the onset of Winter, however, the final end emerged due to the Great Fire of London destroying central parts of the city in September 1666. </p>\n", "_____no_output_____" ], [ "### Data Source\n<p>The calculation of the average death rate from the Great Plague will be generated from two raster maps. The model will be using known rat populations and average population densities of 16 different parishes within London, both from historical records, recorded by rat-catchers and parish figures in 1665, respectively. The original maps have data stored for each 400m x 400m area as text data, but the figures have been averaged to represent either the area covered by the Parish or the area within which the rat-catcher operates.\n \nThe relationship to calculate the average death rate from this source data is as follows: \n\n<b>Death Rate = (0.8 x Rat Population)(1.3 x Population Density) </b></p> \n", "_____no_output_____" ], [ "### Model Expectations \n\n<p>The model should first show maps of the original source data: the rat populations and population densities for the 16 investigated parishes. These maps will then be combined using the calculation to generate the average death rate from the Great Plague per week and will be mapped as an image. The final map will then be altered so the user will be able to manipulate the weights of either the rat population or the density population to envision how these alternate factors may change the overall death rate. </p>\n \n<p>The code should run on Windows.</p>\n", "_____no_output_____" ], [ "------------------------------------------------------------------------------\n### Part 1 - Read in Source Data", "_____no_output_____" ] ], [ [ "'''Step 1 - Set up initial imports for programme'''\nimport random\n%matplotlib inline \nimport matplotlib.pyplot \nimport matplotlib\nimport matplotlib.animation\nimport os\nimport requests\nimport tkinter\nimport pandas as pd #Shortened in standard python documentation format\nimport numpy as np #Shortened in standard python documentation format\nimport ipywidgets as widgets #Shortened in standard python documentation format\n", "_____no_output_____" ] ], [ [ "<p><u> Map 1</u> - Rat Populations (Average Rats caught per week) </p>", "_____no_output_____" ] ], [ [ "'''Step 2 - Import data for the rat populations and generate environment from the 2D array'''\n\n#Set up a base path for the import of the rats txt file\nbase_path = \"C:\\\\Users\\\\Home\\\\Documents\\\\MSc GIS\\\\Programming\\\\Black_Death\\\\BlackDeathProject\" #Basepath\ndeathrats = \"deathrats.txt\" #Saved filename\npath_to_file = os.path.join(base_path, deathrats)\nf = open(path_to_file , 'r')\n#mapA = f.read()\n#print(mapA) #Test to show data has imported\n\n#Set up an environment to read the rats txt file into - this is called environmentA\nenvironmentA = []\nfor line in f:\n parsed_line = str.split(line, \",\") #Split values up via commas\n rowlist = []\n for word in parsed_line:\n rowlist.append(float(word))\n environmentA.append(rowlist) #Append all lists individually so can print environment\nf.close()\n#print(environmentA) #Test environment appears and all lines run \n\n#Display environment of rat populations\nmatplotlib.pyplot.xlim(0, 400) #Set up x-axis\nmatplotlib.pyplot.ylim(0, 400) #Set up y-axis\nmatplotlib.pyplot.imshow(environmentA) #Shows the environment\nmatplotlib.pyplot.title('Average Rat Populations', loc='center') #Adds a centred title\nhsv() #Altered colourmap to red-yellow-green-cyan-blue-pink-magenta display, from original viridis: aids user interpretation\n", "_____no_output_____" ] ], [ [ "<p> This map contains the data for the average rat populations denoted from the amount of rats caught per week. The data is initially placed into a text file which can be seen through print(mapA), but then has been put into an environment which is shown. The different colours show the different amounts of rats, however, this will have more useful when combined with Map 2 in Part 2 when calculating the overall death rates. </p>", "_____no_output_____" ], [ "<p><u> Map 2</u> - Average Population Densities (per Parish) </p>", "_____no_output_____" ] ], [ [ "'''Step 3 - Import data for the parish population densities and generate environment from the 2D array'''\n\n#Set up a base path for the import of the parish txt file\n#base_path = \"C:\\\\Users\\\\Home\\\\Documents\\\\MSc GIS\\\\Programming\\\\Black_Death\\\\BlackDeathProject\" #Basepath\ndeathparishes = \"deathparishes.txt\" #Saved filename\npath_to_file = os.path.join(base_path, deathparishes)\nfd = open(path_to_file , 'r')\n#mapB = fd.read()\n#print(mapB) #Test to show data has imported\n\n#Set up an environment to read the parish txt file into - this is called environmentB\nenvironmentB = []\nfor line in fd:\n parsed_line = str.split(line, \",\") #Split values up via commas\n rowlist = []\n for word in parsed_line:\n rowlist.append(float(word))\n environmentB.append(rowlist) #Append all lists individually so can print environment\nf.close()\n#print(environmentB) #Test environment appears and all lines run \n\n#Display environment of parish populations\nmatplotlib.pyplot.xlim(0, 400) #Set up x-axis\nmatplotlib.pyplot.ylim(0, 400) #Set up y-axis\nmatplotlib.pyplot.imshow(environmentB) #Shows the environment\nmatplotlib.pyplot.title('Average Parish Population Densities', loc='center') #Adds a centred title\nhsv() #Altered colourmap to red-yellow-green-cyan-blue-pink-magenta display, from original viridis: aids user interpretation\n", "_____no_output_____" ] ], [ [ "<p> This map contains the data for the average population densities per the 16 parishes investigated. The data is initially placed into a text file which can be seen through print(mapB), but then has been put into an environment which is shown. The different colours show the different populations per parish. </p>", "_____no_output_____" ], [ "------------------------------------------------------------------------------\n### Part 2 - Calculate the Average Death Rate", "_____no_output_____" ] ], [ [ "'''Step 4 - Calculate Map of Average death rates '''\n\n#Sets up a list named results to append all calculated values to\nresult = []\n\nfor r in range(len(environmentA)):#Goes through both environments' (A and B) rows\n row_a = environmentA[r] \n row_b = environmentB[r]\n rowlist = []\n result.append(rowlist) #Append all lists individually so can merge values from environmentA and environmentB\n for c in range(len(row_a)): #Goes through both environments' (A and B) columns \n rats = row_a[c] \n parishes = row_b[c]\n # d = (0.8 x r) x (1.3 x p) Equation used to generate average death rate \n d = (0.8 * rats) * (1.3 * parishes) #Puts values through death average equation with initial set parameters\n rowlist.append(d)\n #print(d) #Test that results array shows \n\n'''Step 5 - Plot and show the average death rates'''\n\n#Sets up environment to display the results\nmatplotlib.pyplot.xlim(0, 400) #Set up x-axis\nmatplotlib.pyplot.ylim(0, 400) #Set up y-axis\nmatplotlib.pyplot.imshow(result) #Shows the environment\nmatplotlib.pyplot.title('Average Weekly Death Rates of the Great Plague', loc='center') #Adds a centred title\nhsv() #Altered colourmap to red-yellow-green-cyan-blue-pink-magenta display, from original viridis: aids user interpretation\n\n\n#To do: \n#Insert legend\n\n'''Step 6 - Save the average death rate results as a seperate txt.file'''\n\nnp.savetxt('result.txt', result, fmt='%-6.2f' , newline=\"\\r\\n\") #Each row should equal a new line on the map\n#Results have been padded to a width of 6 and rounded to 2 decimal points within the txt.file\n", "_____no_output_____" ] ], [ [ "<p> The output map within Part 2 displays the average death rate calculations within the 400x400 environment of the parishes investigated. The results array has been saved as a <i> result.txt </i> file (rounded to two decimal points) that can be manipulated and utilised for further investigation. </p> ", "_____no_output_____" ], [ "------------------------------------------------------------------------------\n### Part 3 - Display the Death Rate with Changing Parameters", "_____no_output_____" ] ], [ [ "'''Step 7 - Set up Rat Population Parameter Slider'''\n\n#Generate a slider for the rats parameter\nsR = widgets.FloatSlider(\n value=0.8, #Initial parameter value set by the equation\n min=0, #Minimum of range is 0\n max=5.0, #Maximum of range is 5\n step=0.1, #Values get to 1 decimal place increments\n description='Rats:', #Label for slider\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f', \n) \ndisplay(sR) #Dispays the parameter slider for rats that users can alter\n\n'''Step 8 - Set up Parish Population Density Parameter Slider'''\n\n#Generate a slider for the parish parameter\nsP = widgets.FloatSlider(\n value=1.3, #Initial parameter value set by the equation\n min=0, #Minimum of range is 0\n max=5.0, #Maximum of range is 5\n step=0.1, #Values get to 1 decimal place increments\n description='Parishes:', #Label for slider\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n) \ndisplay(sP) #Displays the parameter slider for parish population that users can alter ", "_____no_output_____" ] ], [ [ "<p> The sliders above are available to alter to investigate the relationship between the rat population values and the average population density amounts. These will then be the next set parameters when the proceeding cell is run. </p>", "_____no_output_____" ] ], [ [ "'''Step 9 - Display the Changed Parameters '''\n\n#Formatting to display parameter amounts to correlate to the underlying map\nprint('Changed Parameter Values')\nprint('Rats:', sR.value) \nprint('Parishes:', sP.value)\n\n\n'''Step 10 - Create a map of the death rate average with new changed parameters'''\n\n#Alter the results list to incorporate the altered parameter values\nresult = []\n\nfor r in range(len(environmentA)): #Goes through both environments' (A and B) rows\n row_a = environmentA[r]\n row_b = environmentB[r]\n rowlist = []\n result.append(rowlist) #Append all lists individually so can merge values from environmentA and environmentB\n for c in range(len(row_a)): #Goes through both environments' (A and B) columns\n rats = row_a[c] \n parishes = row_b[c]\n # d = (0.8 x r) x (1.3 x p) #Original equation used to generate average death rate \n d = (sR.value * rats) * (sP.value * parishes) #Updated equation to show the altered parameter values\n rowlist.append(d)\n #print(d) #Test that results array has updated\n \n#Set up a larger figure view of the final map\nfig = matplotlib.pyplot.figure(figsize=(7,7))\nax = fig.add_axes([0, 0, 1, 1])\n\nmatplotlib.pyplot.xlim(0, 400) #Set up x-axis\nmatplotlib.pyplot.ylim(0, 400) #Set up y-axis\nmatplotlib.pyplot.imshow(result) #Display the final map\nmatplotlib.pyplot.xlabel('Rat Populations') #Label the x-axis\nmatplotlib.pyplot.ylabel('Parish Densities') #Label the y-axis\nmatplotlib.pyplot.title('Average Weekly Death Rates of the Great Plague at Altered Parameters', loc='center') #Adds a centred title\nhsv() #Altered colourmap to red-yellow-green-cyan-blue-pink-magenta display, from original viridis: aids user interpretation\n\ndef update(d):\n d = (sR * rats)*(sP * parishes)\n rowlist.append(d) #Updates figure with new parameters\n \nprint('Average weekly death rate at these parameters =', round(d,2)) #Print the average weekly death rate with altered parameters to 2 decimal places\n \n", "Changed Parameter Values\nRats: 2.9\nParishes: 1.0\nAverage weekly death rate at these parameters = 29754.0\n" ] ], [ [ "The final map displays the average death rate of people within the 16 investigated parishes affected by the Great Plague of 1665. Changing the parameters will generate a different total value which will be interesting to explore. ", "_____no_output_____" ], [ "------------------------------------------------------------------------------\n### Conclusions and Review\n<p> The code appears to run smoothly and does generate an average weekly death rate successfully, even when parameter values have been changed. The issue that arises is that the final map doesn't change much, albeit small changes, if the parameters are altered. Therefore, to enhance the model further, the map display aspect would be explored to be show a clearer layout of the values, possinly by a line or correlation style graph. This would enable the relationship between the rat populations and the parish population densities to be interrogated further. \n \n<i>n.b.</i> The one issue with the model is that the base paths of the initial txt.file imports need to be altered if copying the code as they are read from a saved folder into the Jupyter notebook. This is simple to do, just a tad annoying! </p>", "_____no_output_____" ], [ "#### References\n<ul type=\"circle\">\n<li><p> Defoe, D. 2005.<i> History of the Plague in London.</i> [Online]. USA: American Book Company. [Accessed 2/1/19] Available from: <a href=\"http://www.gutenberg.org/files/17221/17221-h/17221-h.htm\".>http://www.gutenberg.org/files/17221/17221-h/17221-h.htm.</a> </p></li>\n<li><p> Trueman, C.N. 2015.<i> The Plague of 1665. </i> [Online]. [Accessed 2/1/19]. Available from: <a href=\"https://www.historylearningsite.co.uk/stuart-england/the-plagu,/ae-of-1665/\".> https://www.historylearningsite.co.uk/stuart-england/the-plague-of-1665/.</a> </p> </li>\n<li><p> Wikipedia. 2018. <i> Great Plague of London. </i> [Online]. [Accessed 2/1/19]. Available from: <a href=\"https://en.wikipedia.org/wiki/Great_Plague_of_London.\">https://en.wikipedia.org/wiki/Great_Plague_of_London.</a></p></li>\n</ul>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d058b9f7627a414d37f904f2d30e368ebfe446c0
141,968
ipynb
Jupyter Notebook
docs/T006054_SDLib.ipynb
sparsh-ai/recsys-attacks
d7472b7296515249c1bd1bbb8ea0afa9b07f6d9d
[ "Apache-2.0" ]
1
2022-03-06T07:18:25.000Z
2022-03-06T07:18:25.000Z
docs/T006054_SDLib.ipynb
sparsh-ai/recsys-attacks
d7472b7296515249c1bd1bbb8ea0afa9b07f6d9d
[ "Apache-2.0" ]
null
null
null
docs/T006054_SDLib.ipynb
sparsh-ai/recsys-attacks
d7472b7296515249c1bd1bbb8ea0afa9b07f6d9d
[ "Apache-2.0" ]
1
2022-03-06T07:17:55.000Z
2022-03-06T07:17:55.000Z
141,968
141,968
0.555076
[ [ [ "# SDLib\n> Shilling simulated attacks and detection methods", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "!mkdir -p results", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ] ], [ [ "from collections import defaultdict\nimport numpy as np\nimport random\nimport os\nimport os.path\nfrom os.path import abspath\nfrom os import makedirs,remove\nfrom re import compile,findall,split\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics.pairwise import pairwise_distances,cosine_similarity\nfrom numpy.linalg import norm\nfrom scipy.stats.stats import pearsonr\nfrom math import sqrt,exp\n\nimport sys\nfrom re import split\nfrom multiprocessing import Process,Manager\nfrom time import strftime,localtime,time\nimport re\n\nfrom os.path import abspath\nfrom time import strftime,localtime,time\nfrom sklearn.metrics import classification_report\nfrom re import split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom random import shuffle\nfrom sklearn.tree import DecisionTreeClassifier\nimport time as tm\n\nfrom sklearn.metrics import classification_report\nimport numpy as np\nfrom collections import defaultdict\nfrom math import log,exp\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom random import choice\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport random\n\nfrom sklearn.metrics import classification_report\nimport numpy as np\nfrom collections import defaultdict\nfrom math import log,exp\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\n\nfrom sklearn.metrics import classification_report\nfrom sklearn import preprocessing\nfrom sklearn import metrics\nimport scipy\nfrom scipy.sparse import csr_matrix\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nimport math\nfrom sklearn.naive_bayes import GaussianNB", "_____no_output_____" ] ], [ [ "## Data", "_____no_output_____" ] ], [ [ "!mkdir -p dataset/amazon\n!cd dataset/amazon && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/amazon/profiles.txt\n!cd dataset/amazon && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/amazon/labels.txt", "profiles.txt.2 100%[===================>] 1.46M --.-KB/s in 0.01s \nlabels.txt.2 100%[===================>] 82.62K --.-KB/s in 0.002s \n" ], [ "!mkdir -p dataset/averageattack\n!cd dataset/averageattack && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/averageattack/ratings.txt\n!cd dataset/averageattack && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/averageattack/labels.txt", "ratings.txt 100%[===================>] 531.60K --.-KB/s in 0.007s \nlabels.txt 100%[===================>] 10.25K --.-KB/s in 0s \n" ], [ "!mkdir -p dataset/filmtrust\n!cd dataset/filmtrust && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/filmtrust/ratings.txt\n!cd dataset/filmtrust && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/filmtrust/trust.txt", "ratings.txt 100%[===================>] 367.62K --.-KB/s in 0.006s \ntrust.txt 100%[===================>] 19.15K --.-KB/s in 0s \n" ] ], [ [ "## Config", "_____no_output_____" ], [ "### Configure the Detection Method\n\n<div>\n <table class=\"table table-hover table-bordered\">\n <tr>\n <th width=\"12%\" scope=\"col\"> Entry</th>\n <th width=\"16%\" class=\"conf\" scope=\"col\">Example</th>\n <th width=\"72%\" class=\"conf\" scope=\"col\">Description</th>\n </tr>\n <tr>\n <td>ratings</td>\n <td>dataset/averageattack/ratings.txt</td>\n <td>Set the path to the dirty recommendation dataset. Format: each row separated by empty, tab or comma symbol. </td>\n </tr>\n <tr>\n <td>label</td>\n <td>dataset/averageattack/labels.txt</td>\n <td>Set the path to labels (for users). Format: each row separated by empty, tab or comma symbol. </td>\n </tr>\n <tr>\n <td scope=\"row\">ratings.setup</td>\n <td>-columns 0 1 2</td>\n <td>-columns: (user, item, rating) columns of rating data are used;\n -header: to skip the first head line when reading data<br>\n </td>\n </tr>\n\n <tr>\n <td scope=\"row\">MethodName</td>\n <td>DegreeSAD/PCASelect/etc.</td>\n <td>The name of the detection method<br>\n </td>\n </tr>\n <tr>\n <td scope=\"row\">evaluation.setup</td>\n <td>-testSet dataset/testset.txt</td>\n <td>Main option: -testSet, -ap, -cv <br>\n -testSet path/to/test/file (need to specify the test set manually)<br>\n -ap ratio (ap means that the user set (including items and ratings) are automatically partitioned into training set and test set, the number is the ratio of test set. e.g. -ap 0.2)<br>\n -cv k (-cv means cross validation, k is the number of the fold. e.g. -cv 5)<br>\n </td>\n </tr>\n\n <tr>\n <td scope=\"row\">output.setup</td>\n <td>on -dir Results/</td>\n <td>Main option: whether to output recommendation results<br>\n -dir path: the directory path of output results.\n </td>\n </tr>\n </table>\n</div>", "_____no_output_____" ], [ "### Configure the Shilling Model\n\n<div>\n <table class=\"table table-hover table-bordered\">\n\n <tr>\n <th width=\"12%\" scope=\"col\"> Entry</th>\n <th width=\"16%\" class=\"conf\" scope=\"col\">Example</th>\n <th width=\"72%\" class=\"conf\" scope=\"col\">Description</th>\n </tr>\n <tr>\n <td>ratings</td>\n <td>dataset/averageattack/ratings.txt</td>\n <td>Set the path to the recommendation dataset. Format: each row separated by empty, tab or comma symbol. </td>\n </tr>\n <tr>\n <td scope=\"row\">ratings.setup</td>\n <td>-columns 0 1 2</td>\n <td>-columns: (user, item, rating) columns of rating data are used;\n -header: to skip the first head line when reading data<br>\n </td>\n </tr>\n <tr>\n <td>attackSize</td>\n <td>0.01</td>\n <td>The ratio of the injected spammers to genuine users</td>\n </tr>\n <tr>\n <td>fillerSize</td>\n <td>0.01</td>\n <td>The ratio of the filler items to all items </td>\n </tr>\n <tr>\n <td>selectedSize</td>\n <td>0.001</td>\n <td>The ratio of the selected items to all items </td>\n </tr>\n <tr>\n <td>linkSize</td>\n <td>0.01</td>\n <td>The ratio of the users maliciously linked by a spammer to all user </td>\n </tr>\n <tr>\n <td>targetCount</td>\n <td>20</td>\n <td>The count of the targeted items </td>\n </tr>\n\n <tr>\n <td>targetScore</td>\n <td>5.0</td>\n <td>The score given to the target items</td>\n </tr>\n <tr>\n <td>threshold</td>\n <td>3.0</td>\n <td>Item has an average score lower than threshold may be chosen as one of the target items</td>\n </tr>\n\n <tr>\n <td>minCount</td>\n <td>3</td>\n <td>Item has a ratings count larger than minCount may be chosen as one of the target items</td>\n </tr>\n\n <tr>\n <td>maxCount</td>\n <td>50</td>\n <td>Item has a rating count smaller that maxCount may be chosen as one of the target items</td>\n </tr>\n\n <tr>\n <td scope=\"row\">outputDir</td>\n <td>data/</td>\n <td> User profiles and labels will be output here </td>\n </tr>\n </table>\n</div>", "_____no_output_____" ] ], [ [ "%%writefile BayesDetector.conf\nratings=dataset/amazon/profiles.txt\nratings.setup=-columns 0 1 2\nlabel=dataset/amazon/labels.txt\nmethodName=BayesDetector\nevaluation.setup=-cv 5\nitem.ranking=off -topN 50\nnum.max.iter=100\nlearnRate=-init 0.03 -max 0.1\nreg.lambda=-u 0.3 -i 0.3\nBayesDetector=-k 10 -negCount 256 -gamma 1 -filter 4 -delta 0.01\noutput.setup=on -dir results/", "Writing BayesDetector.conf\n" ], [ "%%writefile CoDetector.conf\nratings=dataset/amazon/profiles.txt\nratings.setup=-columns 0 1 2\nlabel=dataset/amazon/labels.txt\nmethodName=CoDetector\nevaluation.setup=-ap 0.3\nitem.ranking=on -topN 50\nnum.max.iter=200\nlearnRate=-init 0.01 -max 0.01\nreg.lambda=-u 0.8 -i 0.4\nCoDetector=-k 10 -negCount 256 -gamma 1 -filter 4\noutput.setup=on -dir results/amazon/", "Writing CoDetector.conf\n" ], [ "%%writefile DegreeSAD.conf\nratings=dataset/amazon/profiles.txt\nratings.setup=-columns 0 1 2\nlabel=dataset/amazon/labels.txt\nmethodName=DegreeSAD\nevaluation.setup=-cv 5\noutput.setup=on -dir results/", "Overwriting DegreeSAD.conf\n" ], [ "%%writefile FAP.conf\nratings=dataset/averageattack/ratings.txt\nratings.setup=-columns 0 1 2\nlabel=dataset/averageattack/labels.txt\nmethodName=FAP\nevaluation.setup=-ap 0.000001\nseedUser=350\ntopKSpam=1557\noutput.setup=on -dir results/", "Writing FAP.conf\n" ], [ "%%writefile PCASelectUsers.conf\nratings=dataset/averageattack/ratings.txt\nratings.setup=-columns 0 1 2\nlabel=dataset/averageattack/labels.txt\nmethodName=PCASelectUsers\nevaluation.setup=-ap 0.00001\nkVals=3\nattackSize=0.1\noutput.setup=on -dir results/", "Writing PCASelectUsers.conf\n" ], [ "%%writefile SemiSAD.conf\nratings=dataset/averageattack/ratings.txt\nratings.setup=-columns 0 1 2\nlabel=dataset/averageattack/labels.txt\nmethodName=SemiSAD\nevaluation.setup=-ap 0.2\nLambda=0.5\ntopK=28\noutput.setup=on -dir results/", "Writing SemiSAD.conf\n" ] ], [ [ "## Baseclass", "_____no_output_____" ] ], [ [ "class SDetection(object):\n\n def __init__(self,conf,trainingSet=None,testSet=None,labels=None,fold='[1]'):\n self.config = conf\n self.isSave = False\n self.isLoad = False\n self.foldInfo = fold\n self.labels = labels\n self.dao = RatingDAO(self.config, trainingSet, testSet)\n self.training = []\n self.trainingLabels = []\n self.test = []\n self.testLabels = []\n\n def readConfiguration(self):\n self.algorName = self.config['methodName']\n self.output = LineConfig(self.config['output.setup'])\n\n\n def printAlgorConfig(self):\n \"show algorithm's configuration\"\n print('Algorithm:',self.config['methodName'])\n print('Ratings dataSet:',abspath(self.config['ratings']))\n if LineConfig(self.config['evaluation.setup']).contains('-testSet'):\n print('Test set:',abspath(LineConfig(self.config['evaluation.setup']).getOption('-testSet')))\n #print 'Count of the users in training set: ',len()\n print('Training set size: (user count: %d, item count %d, record count: %d)' %(self.dao.trainingSize()))\n print('Test set size: (user count: %d, item count %d, record count: %d)' %(self.dao.testSize()))\n print('='*80)\n\n def initModel(self):\n pass\n\n def buildModel(self):\n pass\n\n def saveModel(self):\n pass\n\n def loadModel(self):\n pass\n\n def predict(self):\n pass\n\n def execute(self):\n self.readConfiguration()\n if self.foldInfo == '[1]':\n self.printAlgorConfig()\n # load model from disk or build model\n if self.isLoad:\n print('Loading model %s...' % (self.foldInfo))\n self.loadModel()\n else:\n print('Initializing model %s...' % (self.foldInfo))\n self.initModel()\n print('Building Model %s...' % (self.foldInfo))\n self.buildModel()\n\n # preict the ratings or item ranking\n print('Predicting %s...' % (self.foldInfo))\n prediction = self.predict()\n report = classification_report(self.testLabels, prediction, digits=4)\n currentTime = currentTime = strftime(\"%Y-%m-%d %H-%M-%S\", localtime(time()))\n FileIO.writeFile(self.output['-dir'],self.algorName+'@'+currentTime+self.foldInfo,report)\n # save model\n if self.isSave:\n print('Saving model %s...' % (self.foldInfo))\n self.saveModel()\n print(report)\n return report", "_____no_output_____" ], [ "class SSDetection(SDetection):\n\n def __init__(self,conf,trainingSet=None,testSet=None,labels=None,relation=list(),fold='[1]'):\n super(SSDetection, self).__init__(conf,trainingSet,testSet,labels,fold)\n self.sao = SocialDAO(self.config, relation) # social relations access control", "_____no_output_____" ] ], [ [ "## Utils", "_____no_output_____" ] ], [ [ "class Config(object):\n def __init__(self,fileName):\n self.config = {}\n self.readConfiguration(fileName)\n\n def __getitem__(self, item):\n if not self.contains(item):\n print('parameter '+item+' is invalid!')\n exit(-1)\n return self.config[item]\n\n def getOptions(self,item):\n if not self.contains(item):\n print('parameter '+item+' is invalid!')\n exit(-1)\n return self.config[item]\n\n def contains(self,key):\n return key in self.config\n\n def readConfiguration(self,fileName):\n if not os.path.exists(abspath(fileName)):\n print('config file is not found!')\n raise IOError\n with open(fileName) as f:\n for ind,line in enumerate(f):\n if line.strip()!='':\n try:\n key,value=line.strip().split('=')\n self.config[key]=value\n except ValueError:\n print('config file is not in the correct format! Error Line:%d'%(ind))\n\n\nclass LineConfig(object):\n def __init__(self,content):\n self.line = content.strip().split(' ')\n self.options = {}\n self.mainOption = False\n if self.line[0] == 'on':\n self.mainOption = True\n elif self.line[0] == 'off':\n self.mainOption = False\n for i,item in enumerate(self.line):\n if (item.startswith('-') or item.startswith('--')) and not item[1:].isdigit():\n ind = i+1\n for j,sub in enumerate(self.line[ind:]):\n if (sub.startswith('-') or sub.startswith('--')) and not sub[1:].isdigit():\n ind = j\n break\n if j == len(self.line[ind:])-1:\n ind=j+1\n break\n try:\n self.options[item] = ' '.join(self.line[i+1:i+1+ind])\n except IndexError:\n self.options[item] = 1\n\n\n def __getitem__(self, item):\n if not self.contains(item):\n print('parameter '+item+' is invalid!')\n exit(-1)\n return self.options[item]\n\n def getOption(self,key):\n if not self.contains(key):\n print('parameter '+key+' is invalid!')\n exit(-1)\n return self.options[key]\n\n def isMainOn(self):\n return self.mainOption\n\n def contains(self,key):\n return key in self.options", "_____no_output_____" ], [ "class FileIO(object):\n def __init__(self):\n pass\n\n @staticmethod\n def writeFile(dir,file,content,op = 'w'):\n if not os.path.exists(dir):\n os.makedirs(dir)\n if type(content)=='str':\n with open(dir + file, op) as f:\n f.write(content)\n else:\n with open(dir+file,op) as f:\n f.writelines(content)\n\n @staticmethod\n def deleteFile(filePath):\n if os.path.exists(filePath):\n remove(filePath)\n\n @staticmethod\n def loadDataSet(conf, file, bTest=False):\n trainingData = defaultdict(dict)\n testData = defaultdict(dict)\n ratingConfig = LineConfig(conf['ratings.setup'])\n if not bTest:\n print('loading training data...')\n else:\n print('loading test data...')\n with open(file) as f:\n ratings = f.readlines()\n # ignore the headline\n if ratingConfig.contains('-header'):\n ratings = ratings[1:]\n # order of the columns\n order = ratingConfig['-columns'].strip().split()\n\n for lineNo, line in enumerate(ratings):\n items = split(' |,|\\t', line.strip())\n if not bTest and len(order) < 3:\n print('The rating file is not in a correct format. Error: Line num %d' % lineNo)\n exit(-1)\n try:\n userId = items[int(order[0])]\n itemId = items[int(order[1])]\n if bTest and len(order)<3:\n rating = 1 #default value\n else:\n rating = items[int(order[2])]\n\n except ValueError:\n print('Error! Have you added the option -header to the rating.setup?')\n exit(-1)\n if not bTest:\n trainingData[userId][itemId]=float(rating)\n else:\n testData[userId][itemId] = float(rating)\n if not bTest:\n return trainingData\n else:\n return testData\n\n @staticmethod\n def loadRelationship(conf, filePath):\n socialConfig = LineConfig(conf['social.setup'])\n relation = []\n print('loading social data...')\n with open(filePath) as f:\n relations = f.readlines()\n # ignore the headline\n if socialConfig.contains('-header'):\n relations = relations[1:]\n # order of the columns\n order = socialConfig['-columns'].strip().split()\n if len(order) <= 2:\n print('The social file is not in a correct format.')\n for lineNo, line in enumerate(relations):\n items = split(' |,|\\t', line.strip())\n if len(order) < 2:\n print('The social file is not in a correct format. Error: Line num %d' % lineNo)\n exit(-1)\n userId1 = items[int(order[0])]\n userId2 = items[int(order[1])]\n if len(order) < 3:\n weight = 1\n else:\n weight = float(items[int(order[2])])\n relation.append([userId1, userId2, weight])\n return relation\n\n\n @staticmethod\n def loadLabels(filePath):\n labels = {}\n with open(filePath) as f:\n for line in f:\n items = split(' |,|\\t', line.strip())\n labels[items[0]] = items[1]\n return labels", "_____no_output_____" ], [ "class DataSplit(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def dataSplit(data,test_ratio = 0.3,output=False,path='./',order=1):\n if test_ratio>=1 or test_ratio <=0:\n test_ratio = 0.3\n testSet = {}\n trainingSet = {}\n for user in data:\n if random.random() < test_ratio:\n testSet[user] = data[user].copy()\n else:\n trainingSet[user] = data[user].copy()\n\n if output:\n FileIO.writeFile(path,'testSet['+str(order)+']',testSet)\n FileIO.writeFile(path, 'trainingSet[' + str(order) + ']', trainingSet)\n return trainingSet,testSet\n\n @staticmethod\n def crossValidation(data,k,output=False,path='./',order=1):\n if k<=1 or k>10:\n k=3\n for i in range(k):\n trainingSet = {}\n testSet = {}\n for ind,user in enumerate(data):\n if ind%k == i:\n testSet[user] = data[user].copy()\n else:\n trainingSet[user] = data[user].copy()\n yield trainingSet,testSet", "_____no_output_____" ], [ "def drawLine(x,y,labels,xLabel,yLabel,title):\n f, ax = plt.subplots(1, 1, figsize=(10, 6), sharex=True)\n\n #f.tight_layout()\n #sns.set(style=\"darkgrid\")\n\n palette = ['blue','orange','red','green','purple','pink']\n # for i in range(len(ax)):\n # x1 = range(0, len(x))\n #ax.set_xlim(min(x1)-0.2,max(x1)+0.2)\n # mini = 10000;max = -10000\n # for label in labels:\n # if mini>min(y[i][label]):\n # mini = min(y[i][label])\n # if max<max(y[i][label]):\n # max = max(y[i][label])\n # ax[i].set_ylim(mini-0.25*(max-mini),max+0.25*(max-mini))\n # for j,label in enumerate(labels):\n # if j%2==1:\n # ax[i].plot(x1, y[i][label], color=palette[j/2], marker='.', label=label, markersize=12)\n # else:\n # ax[i].plot(x1, y[i][label], color=palette[j/2], marker='.', label=label,markersize=12,linestyle='--')\n # ax[0].set_ylabel(yLabel,fontsize=20)\n\n for xdata,ydata,lab,c in zip(x,y,labels,palette):\n ax.plot(xdata,ydata,color = c,label=lab)\n ind = np.arange(0,60,10)\n ax.set_xticks(ind)\n #ax.set_xticklabels(x)\n ax.set_xlabel(xLabel, fontsize=20)\n ax.set_ylabel(yLabel, fontsize=20)\n ax.tick_params(labelsize=16)\n #ax.tick_params(axs='y', labelsize=20)\n\n ax.set_title(title,fontsize=24)\n plt.grid(True)\n handles, labels1 = ax.get_legend_handles_labels()\n\n #ax[i].legend(handles, labels1, loc=2, fontsize=20)\n # ax.legend(loc=2,\n # ncol=6, borderaxespad=0.,fontsize=20)\n #ax[2].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,fontsize=20)\n ax.legend(loc='upper right',fontsize=20,shadow=True)\n plt.show()\n plt.close()\n\npaths = ['SVD.txt','PMF.txt','EE.txt','RDML.txt']\nfiles = ['EE['+str(i)+'] iteration.txt' for i in range(2,9)]\nx = []\ny = []\n\ndata = []\ndef normalize():\n for file in files:\n xdata = []\n with open(file) as f:\n for line in f:\n items = line.strip().split()\n rmse = items[2].split(':')[1]\n xdata.append(float(rmse))\n data.append(xdata)\n average = []\n for i in range(len(data[0])):\n total = 0\n for k in range(len(data)):\n total += data[k][i]\n average.append(str(i+1)+':'+str(float(total)/len(data))+'\\n')\n with open('EE.txt','w') as f:\n f.writelines(average)\n\n\n\ndef readData():\n for file in paths:\n xdata = []\n ydata = []\n with open(file) as f:\n for line in f:\n items = line.strip().split(':')\n xdata.append(int(items[0]))\n rmse = float(items[1])\n ydata.append(float(rmse))\n x.append(xdata)\n y.append(ydata)\n\n\n\n\n# x = [[1,2,3],[1,2,3]]\n# y = [[1,2,3],[4,5,6]]\n#normalize()\nreadData()\nlabels = ['SVD','PMF','EE','RDML',]\nxlabel = 'Iteration'\nylabel = 'RMSE'\n\ndrawLine(x,y,labels,xlabel,ylabel,'')", "_____no_output_____" ], [ "def l1(x):\n return norm(x,ord=1)\n\ndef l2(x):\n return norm(x)\n\ndef common(x1,x2):\n # find common ratings\n common = (x1!=0)&(x2!=0)\n new_x1 = x1[common]\n new_x2 = x2[common]\n return new_x1,new_x2\n\ndef cosine_sp(x1,x2):\n 'x1,x2 are dicts,this version is for sparse representation'\n total = 0\n denom1 = 0\n denom2 =0\n for k in x1:\n if k in x2:\n total+=x1[k]*x2[k]\n denom1+=x1[k]**2\n denom2+=x2[k]**2\n try:\n return (total + 0.0) / (sqrt(denom1) * sqrt(denom2))\n except ZeroDivisionError:\n return 0\n\n\ndef cosine(x1,x2):\n #find common ratings\n new_x1, new_x2 = common(x1,x2)\n #compute the cosine similarity between two vectors\n sum = new_x1.dot(new_x2)\n denom = sqrt(new_x1.dot(new_x1)*new_x2.dot(new_x2))\n try:\n return float(sum)/denom\n except ZeroDivisionError:\n return 0\n\n #return cosine_similarity(x1,x2)[0][0]\n\ndef pearson_sp(x1,x2):\n total = 0\n denom1 = 0\n denom2 = 0\n overlapped=False\n try:\n mean1 = sum(x1.values())/(len(x1)+0.0)\n mean2 = sum(x2.values()) / (len(x2) + 0.0)\n for k in x1:\n if k in x2:\n total += (x1[k]-mean1) * (x2[k]-mean2)\n denom1 += (x1[k]-mean1) ** 2\n denom2 += (x2[k]-mean2) ** 2\n overlapped=True\n\n return (total + 0.0) / (sqrt(denom1) * sqrt(denom2))\n except ZeroDivisionError:\n if overlapped:\n return 1\n else:\n return 0\n\ndef euclidean(x1,x2):\n #find common ratings\n new_x1, new_x2 = common(x1, x2)\n #compute the euclidean between two vectors\n diff = new_x1-new_x2\n denom = sqrt((diff.dot(diff)))\n try:\n return 1/denom\n except ZeroDivisionError:\n return 0\n\n\ndef pearson(x1,x2):\n #find common ratings\n new_x1, new_x2 = common(x1, x2)\n #compute the pearson similarity between two vectors\n ind1 = new_x1 > 0\n ind2 = new_x2 > 0\n try:\n mean_x1 = float(new_x1.sum())/ind1.sum()\n mean_x2 = float(new_x2.sum())/ind2.sum()\n new_x1 = new_x1 - mean_x1\n new_x2 = new_x2 - mean_x2\n sum = new_x1.dot(new_x2)\n denom = sqrt((new_x1.dot(new_x1))*(new_x2.dot(new_x2)))\n return float(sum) / denom\n except ZeroDivisionError:\n return 0\n\n\ndef similarity(x1,x2,sim):\n if sim == 'pcc':\n return pearson_sp(x1,x2)\n if sim == 'euclidean':\n return euclidean(x1,x2)\n else:\n return cosine_sp(x1, x2)\n\n\ndef normalize(vec,maxVal,minVal):\n 'get the normalized value using min-max normalization'\n if maxVal > minVal:\n return float(vec-minVal)/(maxVal-minVal)+0.01\n elif maxVal==minVal:\n return vec/maxVal\n else:\n print('error... maximum value is less than minimum value.')\n raise ArithmeticError\n\ndef sigmoid(val):\n return 1/(1+exp(-val))\n\n\ndef denormalize(vec,maxVal,minVal):\n return minVal+(vec-0.01)*(maxVal-minVal)", "_____no_output_____" ] ], [ [ "## Shilling models", "_____no_output_____" ], [ "### Attack base class", "_____no_output_____" ] ], [ [ "class Attack(object):\n def __init__(self,conf):\n self.config = Config(conf)\n self.userProfile = FileIO.loadDataSet(self.config,self.config['ratings'])\n self.itemProfile = defaultdict(dict)\n self.attackSize = float(self.config['attackSize'])\n self.fillerSize = float(self.config['fillerSize'])\n self.selectedSize = float(self.config['selectedSize'])\n self.targetCount = int(self.config['targetCount'])\n self.targetScore = float(self.config['targetScore'])\n self.threshold = float(self.config['threshold'])\n self.minCount = int(self.config['minCount'])\n self.maxCount = int(self.config['maxCount'])\n self.minScore = float(self.config['minScore'])\n self.maxScore = float(self.config['maxScore'])\n self.outputDir = self.config['outputDir']\n if not os.path.exists(self.outputDir):\n os.makedirs(self.outputDir)\n for user in self.userProfile:\n for item in self.userProfile[user]:\n self.itemProfile[item][user] = self.userProfile[user][item]\n self.spamProfile = defaultdict(dict)\n self.spamItem = defaultdict(list) #items rated by spammers\n self.targetItems = []\n self.itemAverage = {}\n self.getAverageRating()\n self.selectTarget()\n self.startUserID = 0\n\n def getAverageRating(self):\n for itemID in self.itemProfile:\n li = list(self.itemProfile[itemID].values())\n self.itemAverage[itemID] = float(sum(li)) / len(li)\n\n\n def selectTarget(self,):\n print('Selecting target items...')\n print('-'*80)\n print('Target item Average rating of the item')\n itemList = list(self.itemProfile.keys())\n itemList.sort()\n while len(self.targetItems) < self.targetCount:\n target = np.random.randint(len(itemList)) #generate a target order at random\n\n if len(self.itemProfile[str(itemList[target])]) < self.maxCount and len(self.itemProfile[str(itemList[target])]) > self.minCount \\\n and str(itemList[target]) not in self.targetItems \\\n and self.itemAverage[str(itemList[target])] <= self.threshold:\n self.targetItems.append(str(itemList[target]))\n print(str(itemList[target]),' ',self.itemAverage[str(itemList[target])])\n\n def getFillerItems(self):\n mu = int(self.fillerSize*len(self.itemProfile))\n sigma = int(0.1*mu)\n markedItemsCount = abs(int(round(random.gauss(mu, sigma))))\n markedItems = np.random.randint(len(self.itemProfile), size=markedItemsCount)\n return markedItems.tolist()\n\n def insertSpam(self,startID=0):\n pass\n\n def loadTarget(self,filename):\n with open(filename) as f:\n for line in f:\n self.targetItems.append(line.strip())\n\n def generateLabels(self,filename):\n labels = []\n path = self.outputDir + filename\n with open(path,'w') as f:\n for user in self.spamProfile:\n labels.append(user+' 1\\n')\n for user in self.userProfile:\n labels.append(user+' 0\\n')\n f.writelines(labels)\n print('User profiles have been output to '+abspath(self.config['outputDir'])+'.')\n\n def generateProfiles(self,filename):\n ratings = []\n path = self.outputDir+filename\n with open(path, 'w') as f:\n for user in self.userProfile:\n for item in self.userProfile[user]:\n ratings.append(user+' '+item+' '+str(self.userProfile[user][item])+'\\n')\n\n for user in self.spamProfile:\n for item in self.spamProfile[user]:\n ratings.append(user + ' ' + item + ' ' + str(self.spamProfile[user][item])+'\\n')\n f.writelines(ratings)\n print('User labels have been output to '+abspath(self.config['outputDir'])+'.')", "_____no_output_____" ] ], [ [ "### Relation attack", "_____no_output_____" ] ], [ [ "class RelationAttack(Attack):\n def __init__(self,conf):\n super(RelationAttack, self).__init__(conf)\n self.spamLink = defaultdict(list)\n self.relation = FileIO.loadRelationship(self.config,self.config['social'])\n self.trustLink = defaultdict(list)\n self.trusteeLink = defaultdict(list)\n for u1,u2,t in self.relation:\n self.trustLink[u1].append(u2)\n self.trusteeLink[u2].append(u1)\n self.activeUser = {} # 关注了虚假用户的正常用户\n self.linkedUser = {} # 被虚假用户种植过链接的用户\n\n # def reload(self):\n # super(RelationAttack, self).reload()\n # self.spamLink = defaultdict(list)\n # self.trustLink, self.trusteeLink = loadTrusts(self.config['social'])\n # self.activeUser = {} # 关注了虚假用户的正常用户\n # self.linkedUser = {} # 被虚假用户种植过链接的用户\n\n def farmLink(self):\n pass\n\n def getReciprocal(self,target):\n #当前目标用户关注spammer的概率,依赖于粉丝数和关注数的交集\n reciprocal = float(2 * len(set(self.trustLink[target]).intersection(self.trusteeLink[target])) + 0.1) \\\n / (len(set(self.trustLink[target]).union(self.trusteeLink[target])) + 1)\n reciprocal += (len(self.trustLink[target]) + 0.1) / (len(self.trustLink[target]) + len(self.trusteeLink[target]) + 1)\n reciprocal /= 2\n return reciprocal\n\n def generateSocialConnections(self,filename):\n relations = []\n path = self.outputDir + filename\n with open(path, 'w') as f:\n for u1 in self.trustLink:\n for u2 in self.trustLink[u1]:\n relations.append(u1 + ' ' + u2 + ' 1\\n')\n\n for u1 in self.spamLink:\n for u2 in self.spamLink[u1]:\n relations.append(u1 + ' ' + u2 + ' 1\\n')\n f.writelines(relations)\n print('Social relations have been output to ' + abspath(self.config['outputDir']) + '.')", "_____no_output_____" ] ], [ [ "### Random relation attack", "_____no_output_____" ] ], [ [ "class RandomRelationAttack(RelationAttack):\n def __init__(self,conf):\n super(RandomRelationAttack, self).__init__(conf)\n self.scale = float(self.config['linkSize'])\n\n def farmLink(self): # 随机注入虚假关系\n\n for spam in self.spamProfile:\n\n #对购买了目标项目的用户种植链接\n for item in self.spamItem[spam]:\n if random.random() < 0.01:\n for target in self.itemProfile[item]:\n self.spamLink[spam].append(target)\n response = np.random.random()\n reciprocal = self.getReciprocal(target)\n if response <= reciprocal:\n self.trustLink[target].append(spam)\n self.activeUser[target] = 1\n else:\n self.linkedUser[target] = 1\n #对其它用户以scale的比例种植链接\n for user in self.userProfile:\n if random.random() < self.scale:\n self.spamLink[spam].append(user)\n response = np.random.random()\n reciprocal = self.getReciprocal(user)\n if response < reciprocal:\n self.trustLink[user].append(spam)\n self.activeUser[user] = 1\n else:\n self.linkedUser[user] = 1", "_____no_output_____" ] ], [ [ "### Random attack", "_____no_output_____" ] ], [ [ "class RandomAttack(Attack):\n def __init__(self,conf):\n super(RandomAttack, self).__init__(conf)\n\n\n def insertSpam(self,startID=0):\n print('Modeling random attack...')\n itemList = list(self.itemProfile.keys())\n if startID == 0:\n self.startUserID = len(self.userProfile)\n else:\n self.startUserID = startID\n\n for i in range(int(len(self.userProfile)*self.attackSize)):\n #fill 装填项目\n fillerItems = self.getFillerItems()\n for item in fillerItems:\n self.spamProfile[str(self.startUserID)][str(itemList[item])] = random.randint(self.minScore,self.maxScore)\n\n #target 目标项目\n for j in range(self.targetCount):\n target = np.random.randint(len(self.targetItems))\n self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore\n self.spamItem[str(self.startUserID)].append(self.targetItems[target])\n self.startUserID += 1", "_____no_output_____" ], [ "class RR_Attack(RandomRelationAttack,RandomAttack):\n def __init__(self,conf):\n super(RR_Attack, self).__init__(conf)", "_____no_output_____" ] ], [ [ "### Average attack", "_____no_output_____" ] ], [ [ "class AverageAttack(Attack):\n def __init__(self,conf):\n super(AverageAttack, self).__init__(conf)\n\n def insertSpam(self,startID=0):\n print('Modeling average attack...')\n itemList = list(self.itemProfile.keys())\n if startID == 0:\n self.startUserID = len(self.userProfile)\n else:\n self.startUserID = startID\n\n for i in range(int(len(self.userProfile)*self.attackSize)):\n #fill\n fillerItems = self.getFillerItems()\n for item in fillerItems:\n self.spamProfile[str(self.startUserID)][str(itemList[item])] = round(self.itemAverage[str(itemList[item])])\n #target\n for j in range(self.targetCount):\n target = np.random.randint(len(self.targetItems))\n self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore\n self.spamItem[str(self.startUserID)].append(self.targetItems[target])\n self.startUserID += 1", "_____no_output_____" ] ], [ [ "### Random average relation", "_____no_output_____" ] ], [ [ "class RA_Attack(RandomRelationAttack,AverageAttack):\n def __init__(self,conf):\n super(RA_Attack, self).__init__(conf)", "_____no_output_____" ] ], [ [ "### Bandwagon attack", "_____no_output_____" ] ], [ [ "class BandWagonAttack(Attack):\n def __init__(self,conf):\n super(BandWagonAttack, self).__init__(conf)\n self.hotItems = sorted(iter(self.itemProfile.items()), key=lambda d: len(d[1]), reverse=True)[\n :int(self.selectedSize * len(self.itemProfile))]\n\n\n def insertSpam(self,startID=0):\n print('Modeling bandwagon attack...')\n itemList = list(self.itemProfile.keys())\n if startID == 0:\n self.startUserID = len(self.userProfile)\n else:\n self.startUserID = startID\n\n for i in range(int(len(self.userProfile)*self.attackSize)):\n #fill 装填项目\n fillerItems = self.getFillerItems()\n for item in fillerItems:\n self.spamProfile[str(self.startUserID)][str(itemList[item])] = random.randint(self.minScore,self.maxScore)\n #selected 选择项目\n selectedItems = self.getSelectedItems()\n for item in selectedItems:\n self.spamProfile[str(self.startUserID)][item] = self.targetScore\n #target 目标项目\n for j in range(self.targetCount):\n target = np.random.randint(len(self.targetItems))\n self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore\n self.spamItem[str(self.startUserID)].append(self.targetItems[target])\n self.startUserID += 1\n\n def getFillerItems(self):\n mu = int(self.fillerSize*len(self.itemProfile))\n sigma = int(0.1*mu)\n markedItemsCount = int(round(random.gauss(mu, sigma)))\n if markedItemsCount < 0:\n markedItemsCount = 0\n markedItems = np.random.randint(len(self.itemProfile), size=markedItemsCount)\n return markedItems\n\n def getSelectedItems(self):\n\n mu = int(self.selectedSize * len(self.itemProfile))\n sigma = int(0.1 * mu)\n markedItemsCount = abs(int(round(random.gauss(mu, sigma))))\n markedIndexes = np.random.randint(len(self.hotItems), size=markedItemsCount)\n markedItems = [self.hotItems[index][0] for index in markedIndexes]\n return markedItems", "_____no_output_____" ] ], [ [ "### Random bandwagon relation", "_____no_output_____" ] ], [ [ "class RB_Attack(RandomRelationAttack,BandWagonAttack):\n def __init__(self,conf):\n super(RB_Attack, self).__init__(conf)", "_____no_output_____" ] ], [ [ "### Hybrid attack", "_____no_output_____" ] ], [ [ "class HybridAttack(Attack):\n def __init__(self,conf):\n super(HybridAttack, self).__init__(conf)\n self.aveAttack = AverageAttack(conf)\n self.bandAttack = BandWagonAttack(conf)\n self.randAttack = RandomAttack(conf)\n\n\n def insertSpam(self,startID=0):\n self.aveAttack.insertSpam()\n self.bandAttack.insertSpam(self.aveAttack.startUserID+1)\n self.randAttack.insertSpam(self.bandAttack.startUserID+1)\n self.spamProfile = {}\n self.spamProfile.update(self.aveAttack.spamProfile)\n self.spamProfile.update(self.bandAttack.spamProfile)\n self.spamProfile.update(self.randAttack.spamProfile)\n\n def generateProfiles(self,filename):\n\n ratings = []\n path = self.outputDir + filename\n with open(path, 'w') as f:\n for user in self.userProfile:\n for item in self.userProfile[user]:\n ratings.append(user + ' ' + item + ' ' + str(self.userProfile[user][item]) + '\\n')\n\n for user in self.spamProfile:\n for item in self.spamProfile[user]:\n ratings.append(user + ' ' + item + ' ' + str(self.spamProfile[user][item]) + '\\n')\n f.writelines(ratings)\n print('User labels have been output to ' + abspath(self.config['outputDir']) + '.')\n\n def generateLabels(self,filename):\n labels = []\n path = self.outputDir + filename\n with open(path,'w') as f:\n for user in self.spamProfile:\n labels.append(user+' 1\\n')\n for user in self.userProfile:\n labels.append(user+' 0\\n')\n f.writelines(labels)\n print('User profiles have been output to '+abspath(self.config['outputDir'])+'.')", "_____no_output_____" ] ], [ [ "### Generate data", "_____no_output_____" ] ], [ [ "%%writefile config.conf\nratings=dataset/filmtrust/ratings.txt\nratings.setup=-columns 0 1 2\nsocial=dataset/filmtrust/trust.txt\nsocial.setup=-columns 0 1 2\nattackSize=0.1\nfillerSize=0.05\nselectedSize=0.005\ntargetCount=20\ntargetScore=4.0\nthreshold=3.0\nmaxScore=4.0\nminScore=1.0\nminCount=5\nmaxCount=50\nlinkSize=0.001\noutputDir=output/", "Overwriting config.conf\n" ], [ "attack = RR_Attack('config.conf')\nattack.insertSpam()\nattack.farmLink()\nattack.generateLabels('labels.txt')\nattack.generateProfiles('profiles.txt')\nattack.generateSocialConnections('relations.txt')", "loading training data...\nSelecting target items...\n--------------------------------------------------------------------------------\nTarget item Average rating of the item\n877 2.875\n472 2.5833333333333335\n715 2.8\n528 2.7142857142857144\n169 2.25\n442 2.8055555555555554\n270 2.962962962962963\n681 2.75\n843 3.0\n832 1.8571428571428572\n668 2.7777777777777777\n938 2.9166666666666665\n282 2.642857142857143\n489 2.1666666666666665\n927 2.5833333333333335\n577 2.5\n693 2.6875\n593 2.7083333333333335\n529 2.5\n872 2.3333333333333335\nloading social data...\nModeling random attack...\nUser profiles have been output to /content/output.\nUser labels have been output to /content/output.\nSocial relations have been output to /content/output.\n" ] ], [ [ "## Data access objects", "_____no_output_____" ] ], [ [ "class RatingDAO(object):\n 'data access control'\n def __init__(self,config, trainingData, testData):\n self.config = config\n self.ratingConfig = LineConfig(config['ratings.setup'])\n self.user = {} #used to store the order of users in the training set\n self.item = {} #used to store the order of items in the training set\n self.id2user = {}\n self.id2item = {}\n self.all_Item = {}\n self.all_User = {}\n self.userMeans = {} #used to store the mean values of users's ratings\n self.itemMeans = {} #used to store the mean values of items's ratings\n\n\n self.globalMean = 0\n self.timestamp = {}\n # self.trainingMatrix = None\n # self.validationMatrix = None\n self.testSet_u = testData.copy() # used to store the test set by hierarchy user:[item,rating]\n self.testSet_i = defaultdict(dict) # used to store the test set by hierarchy item:[user,rating]\n self.trainingSet_u = trainingData.copy()\n self.trainingSet_i = defaultdict(dict)\n #self.rScale = []\n\n self.trainingData = trainingData\n self.testData = testData\n self.__generateSet()\n self.__computeItemMean()\n self.__computeUserMean()\n self.__globalAverage()\n\n\n\n def __generateSet(self):\n scale = set()\n # find the maximum rating and minimum value\n # for i, entry in enumerate(self.trainingData):\n # userName, itemName, rating = entry\n # scale.add(float(rating))\n # self.rScale = list(scale)\n # self.rScale.sort()\n\n for i,user in enumerate(self.trainingData):\n for item in self.trainingData[user]:\n\n # makes the rating within the range [0, 1].\n #rating = normalize(float(rating), self.rScale[-1], self.rScale[0])\n #self.trainingSet_u[userName][itemName] = float(rating)\n self.trainingSet_i[item][user] = self.trainingData[user][item]\n # order the user\n if user not in self.user:\n self.user[user] = len(self.user)\n self.id2user[self.user[user]] = user\n # order the item\n if item not in self.item:\n self.item[item] = len(self.item)\n self.id2item[self.item[item]] = item\n self.trainingSet_i[item][user] = self.trainingData[user][item]\n # userList.append\n # triple.append([self.user[userName], self.item[itemName], rating])\n # self.trainingMatrix = new_sparseMatrix.SparseMatrix(triple)\n\n self.all_User.update(self.user)\n self.all_Item.update(self.item)\n\n for i, user in enumerate(self.testData):\n # order the user\n if user not in self.user:\n self.all_User[user] = len(self.all_User)\n for item in self.testData[user]:\n # order the item\n if item not in self.item:\n self.all_Item[item] = len(self.all_Item)\n #self.testSet_u[userName][itemName] = float(rating)\n self.testSet_i[item][user] = self.testData[user][item]\n\n\n def __globalAverage(self):\n total = sum(self.userMeans.values())\n if total==0:\n self.globalMean = 0\n else:\n self.globalMean = total/len(self.userMeans)\n\n def __computeUserMean(self):\n # for u in self.user:\n # n = self.row(u) > 0\n # mean = 0\n #\n # if not self.containsUser(u): # no data about current user in training set\n # pass\n # else:\n # sum = float(self.row(u)[0].sum())\n # try:\n # mean = sum/ n[0].sum()\n # except ZeroDivisionError:\n # mean = 0\n # self.userMeans[u] = mean\n for u in self.trainingSet_u:\n self.userMeans[u] = sum(self.trainingSet_u[u].values())/(len(list(self.trainingSet_u[u].values()))+0.0)\n for u in self.testSet_u:\n self.userMeans[u] = sum(self.testSet_u[u].values())/(len(list(self.testSet_u[u].values()))+0.0)\n\n def __computeItemMean(self):\n # for c in self.item:\n # n = self.col(c) > 0\n # mean = 0\n # if not self.containsItem(c): # no data about current user in training set\n # pass\n # else:\n # sum = float(self.col(c)[0].sum())\n # try:\n # mean = sum / n[0].sum()\n # except ZeroDivisionError:\n # mean = 0\n # self.itemMeans[c] = mean\n for item in self.trainingSet_i:\n self.itemMeans[item] = sum(self.trainingSet_i[item].values())/(len(list(self.trainingSet_i[item].values())) + 0.0)\n for item in self.testSet_i:\n self.itemMeans[item] = sum(self.testSet_i[item].values())/(len(list(self.testSet_i[item].values())) + 0.0)\n\n def getUserId(self,u):\n if u in self.user:\n return self.user[u]\n else:\n return -1\n\n def getItemId(self,i):\n if i in self.item:\n return self.item[i]\n else:\n return -1\n\n def trainingSize(self):\n recordCount = 0\n for user in self.trainingData:\n recordCount+=len(self.trainingData[user])\n return (len(self.trainingSet_u),len(self.trainingSet_i),recordCount)\n\n\n def testSize(self):\n recordCount = 0\n for user in self.testData:\n recordCount += len(self.testData[user])\n return (len(self.testSet_u),len(self.testSet_i),recordCount)\n\n def contains(self,u,i):\n 'whether user u rated item i'\n if u in self.trainingSet_u and i in self.trainingSet_u[u]:\n return True\n return False\n\n def containsUser(self,u):\n 'whether user is in training set'\n return u in self.trainingSet_u\n\n def containsItem(self,i):\n 'whether item is in training set'\n return i in self.trainingSet_i\n\n def allUserRated(self, u):\n if u in self.user:\n return list(self.trainingSet_u[u].keys()), list(self.trainingSet_u[u].values())\n else:\n return list(self.testSet_u[u].keys()), list(self.testSet_u[u].values())\n # def userRated(self,u):\n # if self.trainingMatrix.matrix_User.has_key(self.getUserId(u)):\n # itemIndex = self.trainingMatrix.matrix_User[self.user[u]].keys()\n # rating = self.trainingMatrix.matrix_User[self.user[u]].values()\n # return (itemIndex,rating)\n # return ([],[])\n #\n # def itemRated(self,i):\n # if self.trainingMatrix.matrix_Item.has_key(self.getItemId(i)):\n # userIndex = self.trainingMatrix.matrix_Item[self.item[i]].keys()\n # rating = self.trainingMatrix.matrix_Item[self.item[i]].values()\n # return (userIndex,rating)\n # return ([],[])\n\n # def row(self,u):\n # return self.trainingMatrix.row(self.getUserId(u))\n #\n # def col(self,c):\n # return self.trainingMatrix.col(self.getItemId(c))\n #\n # def sRow(self,u):\n # return self.trainingMatrix.sRow(self.getUserId(u))\n #\n # def sCol(self,c):\n # return self.trainingMatrix.sCol(self.getItemId(c))\n #\n # def rating(self,u,c):\n # return self.trainingMatrix.elem(self.getUserId(u),self.getItemId(c))\n #\n # def ratingScale(self):\n # return (self.rScale[0],self.rScale[1])\n\n # def elemCount(self):\n # return self.trainingMatrix.elemCount()", "_____no_output_____" ], [ "class SocialDAO(object):\n def __init__(self,conf,relation=list()):\n self.config = conf\n self.user = {} #used to store the order of users\n self.relation = relation\n self.followees = {}\n self.followers = {}\n self.trustMatrix = self.__generateSet()\n\n def __generateSet(self):\n #triple = []\n for line in self.relation:\n userId1,userId2,weight = line\n #add relations to dict\n if userId1 not in self.followees:\n self.followees[userId1] = {}\n self.followees[userId1][userId2] = weight\n if userId2 not in self.followers:\n self.followers[userId2] = {}\n self.followers[userId2][userId1] = weight\n # order the user\n if userId1 not in self.user:\n self.user[userId1] = len(self.user)\n if userId2 not in self.user:\n self.user[userId2] = len(self.user)\n #triple.append([self.user[userId1], self.user[userId2], weight])\n #return new_sparseMatrix.SparseMatrix(triple)\n\n # def row(self,u):\n # #return user u's followees\n # return self.trustMatrix.row(self.user[u])\n #\n # def col(self,u):\n # #return user u's followers\n # return self.trustMatrix.col(self.user[u])\n #\n # def elem(self,u1,u2):\n # return self.trustMatrix.elem(u1,u2)\n\n def weight(self,u1,u2):\n if u1 in self.followees and u2 in self.followees[u1]:\n return self.followees[u1][u2]\n else:\n return 0\n\n # def trustSize(self):\n # return self.trustMatrix.size\n\n def getFollowers(self,u):\n if u in self.followers:\n return self.followers[u]\n else:\n return {}\n\n def getFollowees(self,u):\n if u in self.followees:\n return self.followees[u]\n else:\n return {}\n\n def hasFollowee(self,u1,u2):\n if u1 in self.followees:\n if u2 in self.followees[u1]:\n return True\n else:\n return False\n return False\n\n def hasFollower(self,u1,u2):\n if u1 in self.followers:\n if u2 in self.followers[u1]:\n return True\n else:\n return False\n return False", "_____no_output_____" ] ], [ [ "## Methods", "_____no_output_____" ], [ "### BayesDetector", "_____no_output_____" ] ], [ [ "#BayesDetector: Collaborative Shilling Detection Bridging Factorization and User Embedding\nclass BayesDetector(SDetection):\n def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):\n super(BayesDetector, self).__init__(conf, trainingSet, testSet, labels, fold)\n\n def readConfiguration(self):\n super(BayesDetector, self).readConfiguration()\n extraSettings = LineConfig(self.config['BayesDetector'])\n self.k = int(extraSettings['-k'])\n self.negCount = int(extraSettings['-negCount']) # the number of negative samples\n if self.negCount < 1:\n self.negCount = 1\n\n self.regR = float(extraSettings['-gamma'])\n self.filter = int(extraSettings['-filter'])\n self.delta = float(extraSettings['-delta'])\n learningRate = LineConfig(self.config['learnRate'])\n self.lRate = float(learningRate['-init'])\n self.maxLRate = float(learningRate['-max'])\n self.maxIter = int(self.config['num.max.iter'])\n regular = LineConfig(self.config['reg.lambda'])\n self.regU, self.regI = float(regular['-u']), float(regular['-i'])\n # self.delta = float(self.config['delta'])\n def printAlgorConfig(self):\n super(BayesDetector, self).printAlgorConfig()\n print('k: %d' % self.negCount)\n print('regR: %.5f' % self.regR)\n print('filter: %d' % self.filter)\n print('=' * 80)\n\n def initModel(self):\n super(BayesDetector, self).initModel()\n # self.c = np.random.rand(len(self.dao.all_User) + 1) / 20 # bias value of context\n self.G = np.random.rand(len(self.dao.all_User)+1, self.k) / 100 # context embedding\n self.P = np.random.rand(len(self.dao.all_User)+1, self.k) / 100 # latent user matrix\n self.Q = np.random.rand(len(self.dao.all_Item)+1, self.k) / 100 # latent item matrix\n\n # constructing SPPMI matrix\n self.SPPMI = defaultdict(dict)\n D = len(self.dao.user)\n print('Constructing SPPMI matrix...')\n # for larger data set has many items, the process will be time consuming\n occurrence = defaultdict(dict)\n for user1 in self.dao.all_User:\n iList1, rList1 = self.dao.allUserRated(user1)\n if len(iList1) < self.filter:\n continue\n for user2 in self.dao.all_User:\n if user1 == user2:\n continue\n if user2 not in occurrence[user1]:\n iList2, rList2 = self.dao.allUserRated(user2)\n if len(iList2) < self.filter:\n continue\n count = len(set(iList1).intersection(set(iList2)))\n if count > self.filter:\n occurrence[user1][user2] = count\n occurrence[user2][user1] = count\n\n maxVal = 0\n frequency = {}\n for user1 in occurrence:\n frequency[user1] = sum(occurrence[user1].values()) * 1.0\n D = sum(frequency.values()) * 1.0\n # maxx = -1\n for user1 in occurrence:\n for user2 in occurrence[user1]:\n try:\n val = max([log(occurrence[user1][user2] * D / (frequency[user1] * frequency[user2]), 2) - log(\n self.negCount, 2), 0])\n except ValueError:\n print(self.SPPMI[user1][user2])\n print(self.SPPMI[user1][user2] * D / (frequency[user1] * frequency[user2]))\n if val > 0:\n if maxVal < val:\n maxVal = val\n self.SPPMI[user1][user2] = val\n self.SPPMI[user2][user1] = self.SPPMI[user1][user2]\n\n # normalize\n for user1 in self.SPPMI:\n for user2 in self.SPPMI[user1]:\n self.SPPMI[user1][user2] = self.SPPMI[user1][user2] / maxVal\n\n def buildModel(self):\n self.dao.ratings = dict(self.dao.trainingSet_u, **self.dao.testSet_u)\n #suspicous set\n print('Preparing sets...')\n self.sSet = defaultdict(dict)\n #normal set\n self.nSet = defaultdict(dict)\n # self.NegativeSet = defaultdict(list)\n\n for user in self.dao.user:\n for item in self.dao.ratings[user]:\n # if self.dao.ratings[user][item] >= 5 and self.labels[user]=='1':\n if self.labels[user] =='1':\n self.sSet[item][user] = 1\n # if self.dao.ratings[user][item] >= 5 and self.labels[user] == '0':\n if self.labels[user] == '0':\n self.nSet[item][user] = 1\n # Jointly decompose R(ratings) and SPPMI with shared user latent factors P\n iteration = 0\n while iteration < self.maxIter:\n self.loss = 0\n\n for item in self.sSet:\n i = self.dao.all_Item[item]\n if item not in self.nSet:\n continue\n normalUserList = list(self.nSet[item].keys())\n for user in self.sSet[item]:\n su = self.dao.all_User[user]\n # if len(self.NegativeSet[user]) > 0:\n # item_j = choice(self.NegativeSet[user])\n # else:\n normalUser = choice(normalUserList)\n nu = self.dao.all_User[normalUser]\n\n s = sigmoid(self.P[su].dot(self.Q[i]) - self.P[nu].dot(self.Q[i]))\n self.Q[i] += (self.lRate * (1 - s) * (self.P[su] - self.P[nu]))\n self.P[su] += (self.lRate * (1 - s) * self.Q[i])\n self.P[nu] -= (self.lRate * (1 - s) * self.Q[i])\n\n self.Q[i] -= self.lRate * self.regI * self.Q[i]\n self.P[su] -= self.lRate * self.regU * self.P[su]\n self.P[nu] -= self.lRate * self.regU * self.P[nu]\n\n self.loss += (-log(s))\n #\n # for item in self.sSet:\n # if not self.nSet.has_key(item):\n # continue\n # for user1 in self.sSet[item]:\n # for user2 in self.sSet[item]:\n # su1 = self.dao.all_User[user1]\n # su2 = self.dao.all_User[user2]\n # self.P[su1] += (self.lRate*(self.P[su1]-self.P[su2]))*self.delta\n # self.P[su2] -= (self.lRate*(self.P[su1]-self.P[su2]))*self.delta\n #\n # self.loss += ((self.P[su1]-self.P[su2]).dot(self.P[su1]-self.P[su2]))*self.delta\n\n\n for user in self.dao.ratings:\n for item in self.dao.ratings[user]:\n rating = self.dao.ratings[user][item]\n if rating < 5:\n continue\n error = rating - self.predictRating(user,item)\n u = self.dao.all_User[user]\n i = self.dao.all_Item[item]\n p = self.P[u]\n q = self.Q[i]\n # self.loss += (error ** 2)*self.b\n # update latent vectors\n self.P[u] += (self.lRate * (error * q - self.regU * p))\n self.Q[i] += (self.lRate * (error * p - self.regI * q))\n\n\n for user in self.SPPMI:\n u = self.dao.all_User[user]\n p = self.P[u]\n for context in self.SPPMI[user]:\n v = self.dao.all_User[context]\n m = self.SPPMI[user][context]\n g = self.G[v]\n diff = (m - p.dot(g))\n self.loss += (diff ** 2)\n # update latent vectors\n self.P[u] += (self.lRate * diff * g)\n self.G[v] += (self.lRate * diff * p)\n self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum() + self.regR * (self.G * self.G).sum()\n iteration += 1\n print('iteration:',iteration)\n\n # preparing examples\n self.training = []\n self.trainingLabels = []\n self.test = []\n self.testLabels = []\n\n for user in self.dao.trainingSet_u:\n self.training.append(self.P[self.dao.all_User[user]])\n self.trainingLabels.append(self.labels[user])\n for user in self.dao.testSet_u:\n self.test.append(self.P[self.dao.all_User[user]])\n self.testLabels.append(self.labels[user])\n #\n # tsne = TSNE(n_components=2)\n # self.Y = tsne.fit_transform(self.P)\n #\n # self.normalUsers = []\n # self.spammers = []\n # for user in self.labels:\n # if self.labels[user] == '0':\n # self.normalUsers.append(user)\n # else:\n # self.spammers.append(user)\n #\n #\n # print len(self.spammers)\n # self.normalfeature = np.zeros((len(self.normalUsers), 2))\n # self.spamfeature = np.zeros((len(self.spammers), 2))\n # normal_index = 0\n # for normaluser in self.normalUsers:\n # if normaluser in self.dao.all_User:\n # self.normalfeature[normal_index] = self.Y[self.dao.all_User[normaluser]]\n # normal_index += 1\n #\n # spam_index = 0\n # for spamuser in self.spammers:\n # if spamuser in self.dao.all_User:\n # self.spamfeature[spam_index] = self.Y[self.dao.all_User[spamuser]]\n # spam_index += 1\n # self.randomNormal = np.zeros((500,2))\n # self.randomSpam = np.zeros((500,2))\n # # for i in range(500):\n # # self.randomNormal[i] = self.normalfeature[random.randint(0,len(self.normalfeature)-1)]\n # # self.randomSpam[i] = self.spamfeature[random.randint(0,len(self.spamfeature)-1)]\n # plt.scatter(self.normalfeature[:, 0], self.normalfeature[:, 1], c='red',s=8,marker='o',label='NormalUser')\n # plt.scatter(self.spamfeature[:, 0], self.spamfeature[:, 1], c='blue',s=8,marker='o',label='Spammer')\n # plt.legend(loc='lower left')\n # plt.xticks([])\n # plt.yticks([])\n # plt.savefig('9.png',dpi=500)\n\n\n def predictRating(self,user,item):\n u = self.dao.all_User[user]\n i = self.dao.all_Item[item]\n return self.P[u].dot(self.Q[i])\n\n def predict(self):\n classifier = RandomForestClassifier(n_estimators=12)\n # classifier = DecisionTreeClassifier(criterion='entropy')\n classifier.fit(self.training, self.trainingLabels)\n pred_labels = classifier.predict(self.test)\n print('Decision Tree:')\n return pred_labels", "_____no_output_____" ] ], [ [ "### CoDetector", "_____no_output_____" ] ], [ [ "#CoDetector: Collaborative Shilling Detection Bridging Factorization and User Embedding\nclass CoDetector(SDetection):\n def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):\n super(CoDetector, self).__init__(conf, trainingSet, testSet, labels, fold)\n\n def readConfiguration(self):\n super(CoDetector, self).readConfiguration()\n extraSettings = LineConfig(self.config['CoDetector'])\n self.k = int(extraSettings['-k'])\n self.negCount = int(extraSettings['-negCount']) # the number of negative samples\n if self.negCount < 1:\n self.negCount = 1\n\n self.regR = float(extraSettings['-gamma'])\n self.filter = int(extraSettings['-filter'])\n\n learningRate = LineConfig(self.config['learnRate'])\n self.lRate = float(learningRate['-init'])\n self.maxLRate = float(learningRate['-max'])\n self.maxIter = int(self.config['num.max.iter'])\n regular = LineConfig(self.config['reg.lambda'])\n self.regU, self.regI = float(regular['-u']), float(regular['-i'])\n\n def printAlgorConfig(self):\n super(CoDetector, self).printAlgorConfig()\n print('k: %d' % self.negCount)\n print('regR: %.5f' % self.regR)\n print('filter: %d' % self.filter)\n print('=' * 80)\n\n def initModel(self):\n super(CoDetector, self).initModel()\n self.w = np.random.rand(len(self.dao.all_User)+1) / 20 # bias value of user\n self.c = np.random.rand(len(self.dao.all_User)+1)/ 20 # bias value of context\n self.G = np.random.rand(len(self.dao.all_User)+1, self.k) / 20 # context embedding\n self.P = np.random.rand(len(self.dao.all_User)+1, self.k) / 20 # latent user matrix\n self.Q = np.random.rand(len(self.dao.all_Item)+1, self.k) / 20 # latent item matrix\n\n\n # constructing SPPMI matrix\n self.SPPMI = defaultdict(dict)\n D = len(self.dao.user)\n print('Constructing SPPMI matrix...')\n # for larger data set has many items, the process will be time consuming\n occurrence = defaultdict(dict)\n for user1 in self.dao.all_User:\n iList1, rList1 = self.dao.allUserRated(user1)\n if len(iList1) < self.filter:\n continue\n for user2 in self.dao.all_User:\n if user1 == user2:\n continue\n if user2 not in occurrence[user1]:\n iList2, rList2 = self.dao.allUserRated(user2)\n if len(iList2) < self.filter:\n continue\n count = len(set(iList1).intersection(set(iList2)))\n if count > self.filter:\n occurrence[user1][user2] = count\n occurrence[user2][user1] = count\n\n maxVal = 0\n frequency = {}\n for user1 in occurrence:\n frequency[user1] = sum(occurrence[user1].values()) * 1.0\n D = sum(frequency.values()) * 1.0\n # maxx = -1\n for user1 in occurrence:\n for user2 in occurrence[user1]:\n try:\n val = max([log(occurrence[user1][user2] * D / (frequency[user1] * frequency[user2]), 2) - log(\n self.negCount, 2), 0])\n except ValueError:\n print(self.SPPMI[user1][user2])\n print(self.SPPMI[user1][user2] * D / (frequency[user1] * frequency[user2]))\n if val > 0:\n if maxVal < val:\n maxVal = val\n self.SPPMI[user1][user2] = val\n self.SPPMI[user2][user1] = self.SPPMI[user1][user2]\n\n # normalize\n for user1 in self.SPPMI:\n for user2 in self.SPPMI[user1]:\n self.SPPMI[user1][user2] = self.SPPMI[user1][user2] / maxVal\n\n def buildModel(self):\n # Jointly decompose R(ratings) and SPPMI with shared user latent factors P\n iteration = 0\n while iteration < self.maxIter:\n self.loss = 0\n\n self.dao.ratings = dict(self.dao.trainingSet_u, **self.dao.testSet_u)\n for user in self.dao.ratings:\n for item in self.dao.ratings[user]:\n rating = self.dao.ratings[user][item]\n error = rating - self.predictRating(user,item)\n u = self.dao.all_User[user]\n i = self.dao.all_Item[item]\n p = self.P[u]\n q = self.Q[i]\n self.loss += error ** 2\n # update latent vectors\n self.P[u] += self.lRate * (error * q - self.regU * p)\n self.Q[i] += self.lRate * (error * p - self.regI * q)\n\n\n for user in self.SPPMI:\n u = self.dao.all_User[user]\n p = self.P[u]\n for context in self.SPPMI[user]:\n v = self.dao.all_User[context]\n m = self.SPPMI[user][context]\n g = self.G[v]\n diff = (m - p.dot(g) - self.w[u] - self.c[v])\n self.loss += diff ** 2\n # update latent vectors\n self.P[u] += self.lRate * diff * g\n self.G[v] += self.lRate * diff * p\n self.w[u] += self.lRate * diff\n self.c[v] += self.lRate * diff\n self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum() + self.regR * (self.G * self.G).sum()\n iteration += 1\n print('iteration:',iteration)\n\n # preparing examples\n self.training = []\n self.trainingLabels = []\n self.test = []\n self.testLabels = []\n\n for user in self.dao.trainingSet_u:\n self.training.append(self.P[self.dao.all_User[user]])\n self.trainingLabels.append(self.labels[user])\n for user in self.dao.testSet_u:\n self.test.append(self.P[self.dao.all_User[user]])\n self.testLabels.append(self.labels[user])\n\n def predictRating(self,user,item):\n u = self.dao.all_User[user]\n i = self.dao.all_Item[item]\n return self.P[u].dot(self.Q[i])\n\n def predict(self):\n classifier = DecisionTreeClassifier(criterion='entropy')\n classifier.fit(self.training, self.trainingLabels)\n pred_labels = classifier.predict(self.test)\n print('Decision Tree:')\n return pred_labels", "_____no_output_____" ] ], [ [ "### DegreeSAD", "_____no_output_____" ] ], [ [ "class DegreeSAD(SDetection):\n def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):\n super(DegreeSAD, self).__init__(conf, trainingSet, testSet, labels, fold)\n\n def buildModel(self):\n self.MUD = {}\n self.RUD = {}\n self.QUD = {}\n # computing MUD,RUD,QUD for training set\n sList = sorted(iter(self.dao.trainingSet_i.items()), key=lambda d: len(d[1]), reverse=True)\n maxLength = len(sList[0][1])\n for user in self.dao.trainingSet_u:\n self.MUD[user] = 0\n for item in self.dao.trainingSet_u[user]:\n self.MUD[user] += len(self.dao.trainingSet_i[item]) #/ float(maxLength)\n self.MUD[user]/float(len(self.dao.trainingSet_u[user]))\n lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.trainingSet_u[user]]\n lengthList.sort(reverse=True)\n self.RUD[user] = lengthList[0] - lengthList[-1]\n\n lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.trainingSet_u[user]]\n lengthList.sort()\n self.QUD[user] = lengthList[int((len(lengthList) - 1) / 4.0)]\n\n # computing MUD,RUD,QUD for test set\n for user in self.dao.testSet_u:\n self.MUD[user] = 0\n for item in self.dao.testSet_u[user]:\n self.MUD[user] += len(self.dao.trainingSet_i[item]) #/ float(maxLength)\n for user in self.dao.testSet_u:\n lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.testSet_u[user]]\n lengthList.sort(reverse=True)\n self.RUD[user] = lengthList[0] - lengthList[-1]\n for user in self.dao.testSet_u:\n lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.testSet_u[user]]\n lengthList.sort()\n self.QUD[user] = lengthList[int((len(lengthList) - 1) / 4.0)]\n\n # preparing examples\n\n for user in self.dao.trainingSet_u:\n self.training.append([self.MUD[user], self.RUD[user], self.QUD[user]])\n self.trainingLabels.append(self.labels[user])\n\n for user in self.dao.testSet_u:\n self.test.append([self.MUD[user], self.RUD[user], self.QUD[user]])\n self.testLabels.append(self.labels[user])\n\n def predict(self):\n # classifier = LogisticRegression()\n # classifier.fit(self.training, self.trainingLabels)\n # pred_labels = classifier.predict(self.test)\n # print 'Logistic:'\n # print classification_report(self.testLabels, pred_labels)\n #\n # classifier = SVC()\n # classifier.fit(self.training, self.trainingLabels)\n # pred_labels = classifier.predict(self.test)\n # print 'SVM:'\n # print classification_report(self.testLabels, pred_labels)\n\n classifier = DecisionTreeClassifier(criterion='entropy')\n classifier.fit(self.training, self.trainingLabels)\n pred_labels = classifier.predict(self.test)\n print('Decision Tree:')\n return pred_labels", "_____no_output_____" ] ], [ [ "### FAP", "_____no_output_____" ] ], [ [ "class FAP(SDetection):\n\n def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):\n super(FAP, self).__init__(conf, trainingSet, testSet, labels, fold)\n\n def readConfiguration(self):\n super(FAP, self).readConfiguration()\n # # s means the number of seedUser who be regarded as spammer in training\n self.s =int( self.config['seedUser'])\n # preserve the real spammer ID\n self.spammer = []\n for i in self.dao.user:\n if self.labels[i] == '1':\n self.spammer.append(self.dao.user[i])\n sThreshold = int(0.5 * len(self.spammer))\n if self.s > sThreshold :\n self.s = sThreshold\n print('*** seedUser is more than a half of spammer, so it is set to', sThreshold, '***')\n\n # # predict top-k user as spammer\n self.k = int(self.config['topKSpam'])\n # 0.5 is the ratio of spammer to dataset, it can be changed according to different datasets\n kThreshold = int(0.5 * (len(self.dao.user) - self.s))\n if self.k > kThreshold:\n self.k = kThreshold\n print('*** the number of top-K users is more than threshold value, so it is set to', kThreshold, '***')\n # product transition probability matrix self.TPUI and self.TPIU\n\n def __computeTProbability(self):\n # m--user count; n--item count\n m, n, tmp = self.dao.trainingSize()\n self.TPUI = np.zeros((m, n))\n self.TPIU = np.zeros((n, m))\n\n self.userUserIdDic = {}\n self.itemItemIdDic = {}\n tmpUser = list(self.dao.user.values())\n tmpUserId = list(self.dao.user.keys())\n tmpItem = list(self.dao.item.values())\n tmpItemId = list(self.dao.item.keys())\n for users in range(0, m):\n self.userUserIdDic[tmpUser[users]] = tmpUserId[users]\n for items in range(0, n):\n self.itemItemIdDic[tmpItem[items]] = tmpItemId[items]\n for i in range(0, m):\n for j in range(0, n):\n user = self.userUserIdDic[i]\n item = self.itemItemIdDic[j]\n # if has edge in graph,set a value ;otherwise set 0\n if (user not in self.bipartiteGraphUI) or (item not in self.bipartiteGraphUI[user]):\n continue\n else:\n w = float(self.bipartiteGraphUI[user][item])\n # to avoid positive feedback and reliability problem,we should Polish the w\n otherItemW = 0\n otherUserW = 0\n for otherItem in self.bipartiteGraphUI[user]:\n otherItemW += float(self.bipartiteGraphUI[user][otherItem])\n for otherUser in self.dao.trainingSet_i[item]:\n otherUserW += float(self.bipartiteGraphUI[otherUser][item])\n # wPrime = w*1.0/(otherUserW * otherItemW)\n wPrime = w\n self.TPUI[i][j] = wPrime / otherItemW\n self.TPIU[j][i] = wPrime / otherUserW\n if i % 100 == 0:\n print('progress: %d/%d' %(i,m))\n\n def initModel(self):\n # construction of the bipartite graph\n print(\"constructing bipartite graph...\")\n self.bipartiteGraphUI = {}\n for user in self.dao.trainingSet_u:\n tmpUserItemDic = {} # user-item-point\n for item in self.dao.trainingSet_u[user]:\n # tmpItemUserDic = {}#item-user-point\n recordValue = float(self.dao.trainingSet_u[user][item])\n w = 1 + abs((recordValue - self.dao.userMeans[user]) / self.dao.userMeans[user]) + abs(\n (recordValue - self.dao.itemMeans[item]) / self.dao.itemMeans[item]) + abs(\n (recordValue - self.dao.globalMean) / self.dao.globalMean)\n # tmpItemUserDic[user] = w\n tmpUserItemDic[item] = w\n # self.bipartiteGraphIU[item] = tmpItemUserDic\n self.bipartiteGraphUI[user] = tmpUserItemDic\n # we do the polish in computing the transition probability\n print(\"computing transition probability...\")\n self.__computeTProbability()\n\n def isConvergence(self, PUser, PUserOld):\n if len(PUserOld) == 0:\n return True\n for i in range(0, len(PUser)):\n if (PUser[i] - PUserOld[i]) > 0.01:\n return True\n return False\n\n def buildModel(self):\n # -------init--------\n m, n, tmp = self.dao.trainingSize()\n PUser = np.zeros(m)\n PItem = np.zeros(n)\n self.testLabels = [0 for i in range(m)]\n self.predLabels = [0 for i in range(m)]\n\n # preserve seedUser Index\n self.seedUser = []\n randDict = {}\n for i in range(0, self.s):\n randNum = random.randint(0, len(self.spammer) - 1)\n while randNum in randDict:\n randNum = random.randint(0, len(self.spammer) - 1)\n randDict[randNum] = 0\n self.seedUser.append(int(self.spammer[randNum]))\n # print len(randDict), randDict\n\n #initial user and item spam probability\n for j in range(0, m):\n if j in self.seedUser:\n #print type(j),j\n PUser[j] = 1\n else:\n PUser[j] = random.random()\n for tmp in range(0, n):\n PItem[tmp] = random.random()\n\n # -------iterator-------\n PUserOld = []\n iterator = 0\n while self.isConvergence(PUser, PUserOld):\n #while iterator < 100:\n for j in self.seedUser:\n PUser[j] = 1\n PUserOld = PUser\n PItem = np.dot(self.TPIU, PUser)\n PUser = np.dot(self.TPUI, PItem)\n iterator += 1\n print(self.foldInfo,'iteration', iterator)\n\n PUserDict = {}\n userId = 0\n for i in PUser:\n PUserDict[userId] = i\n userId += 1\n for j in self.seedUser:\n del PUserDict[j]\n\n self.PSort = sorted(iter(PUserDict.items()), key=lambda d: d[1], reverse=True)\n\n\n def predict(self):\n # predLabels\n # top-k user as spammer\n spamList = []\n sIndex = 0\n while sIndex < self.k:\n spam = self.PSort[sIndex][0]\n spamList.append(spam)\n self.predLabels[spam] = 1\n sIndex += 1\n\n # trueLabels\n for user in self.dao.trainingSet_u:\n userInd = self.dao.user[user]\n # print type(user), user, userInd\n self.testLabels[userInd] = int(self.labels[user])\n\n # delete seedUser labels\n differ = 0\n for user in self.seedUser:\n user = int(user - differ)\n # print type(user)\n del self.predLabels[user]\n del self.testLabels[user]\n differ += 1\n\n return self.predLabels", "_____no_output_____" ] ], [ [ "### PCASelectUsers", "_____no_output_____" ] ], [ [ "class PCASelectUsers(SDetection):\n def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]', k=None, n=None ):\n super(PCASelectUsers, self).__init__(conf, trainingSet, testSet, labels, fold)\n\n\n def readConfiguration(self):\n super(PCASelectUsers, self).readConfiguration()\n # K = top-K vals of cov\n self.k = int(self.config['kVals'])\n self.userNum = len(self.dao.trainingSet_u)\n self.itemNum = len(self.dao.trainingSet_i)\n if self.k >= min(self.userNum, self.itemNum):\n self.k = 3\n print('*** k-vals is more than the number of user or item, so it is set to', self.k)\n\n # n = attack size or the ratio of spammers to normal users\n self.n = float(self.config['attackSize'])\n\n\n def buildModel(self):\n #array initialization\n dataArray = np.zeros([self.userNum, self.itemNum], dtype=float)\n self.testLabels = np.zeros(self.userNum)\n self.predLabels = np.zeros(self.userNum)\n\n #add data\n print('construct matrix')\n for user in self.dao.trainingSet_u:\n for item in list(self.dao.trainingSet_u[user].keys()):\n value = self.dao.trainingSet_u[user][item]\n a = self.dao.user[user]\n b = self.dao.item[item]\n dataArray[a][b] = value\n\n sMatrix = csr_matrix(dataArray)\n # z-scores\n sMatrix = preprocessing.scale(sMatrix, axis=0, with_mean=False)\n sMT = np.transpose(sMatrix)\n # cov\n covSM = np.dot(sMT, sMatrix)\n # eigen-value-decomposition\n vals, vecs = scipy.sparse.linalg.eigs(covSM, k=self.k, which='LM')\n\n newArray = np.dot(dataArray**2, np.real(vecs))\n\n distanceDict = {}\n userId = 0\n for user in newArray:\n distance = 0\n for tmp in user:\n distance += tmp\n distanceDict[userId] = float(distance)\n userId += 1\n\n print('sort distance ')\n self.disSort = sorted(iter(distanceDict.items()), key=lambda d: d[1], reverse=False)\n\n\n def predict(self):\n print('predict spammer')\n spamList = []\n i = 0\n while i < self.n * len(self.disSort):\n spam = self.disSort[i][0]\n spamList.append(spam)\n self.predLabels[spam] = 1\n i += 1\n\n # trueLabels\n for user in self.dao.trainingSet_u:\n userInd = self.dao.user[user]\n self.testLabels[userInd] = int(self.labels[user])\n\n return self.predLabels", "_____no_output_____" ] ], [ [ "### SemiSAD", "_____no_output_____" ] ], [ [ "class SemiSAD(SDetection):\n def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):\n super(SemiSAD, self).__init__(conf, trainingSet, testSet, labels, fold)\n\n def readConfiguration(self):\n super(SemiSAD, self).readConfiguration()\n # K = top-K vals of cov\n self.k = int(self.config['topK'])\n # Lambda = λ参数\n self.Lambda = float(self.config['Lambda'])\n\n def buildModel(self):\n self.H = {}\n self.DegSim = {}\n self.LengVar = {}\n self.RDMA = {}\n self.FMTD = {}\n print('Begin feature engineering...')\n # computing H,DegSim,LengVar,RDMA,FMTD for LabledData set\n trainingIndex = 0\n testIndex = 0\n trainingUserCount, trainingItemCount, trainingrecordCount = self.dao.trainingSize()\n testUserCount, testItemCount, testrecordCount = self.dao.testSize()\n for user in self.dao.trainingSet_u:\n trainingIndex += 1\n self.H[user] = 0\n for i in range(10,50,5):\n n = 0\n for item in self.dao.trainingSet_u[user]:\n if(self.dao.trainingSet_u[user][item]==(i/10.0)):\n n+=1\n if n==0:\n self.H[user] += 0\n else:\n self.H[user] += (-(n/(trainingUserCount*1.0))*math.log(n/(trainingUserCount*1.0),2))\n\n SimList = []\n self.DegSim[user] = 0\n for user1 in self.dao.trainingSet_u:\n userA, userB, C, D, E, Count = 0,0,0,0,0,0\n for item in list(set(self.dao.trainingSet_u[user]).intersection(set(self.dao.trainingSet_u[user1]))):\n userA += self.dao.trainingSet_u[user][item]\n userB += self.dao.trainingSet_u[user1][item]\n Count += 1\n if Count==0:\n AverageA = 0\n AverageB = 0\n else:\n AverageA = userA/Count\n AverageB = userB/Count\n for item in list(set(self.dao.trainingSet_u[user]).intersection(set(self.dao.trainingSet_u[user1]))):\n C += (self.dao.trainingSet_u[user][item]-AverageA)*(self.dao.trainingSet_u[user1][item]-AverageB)\n D += np.square(self.dao.trainingSet_u[user][item]-AverageA)\n E += np.square(self.dao.trainingSet_u[user1][item]-AverageB)\n if C==0:\n SimList.append(0.0)\n else:\n SimList.append(C/(math.sqrt(D)*math.sqrt(E)))\n SimList.sort(reverse=True)\n for i in range(1,self.k+1):\n self.DegSim[user] += SimList[i] / (self.k)\n\n GlobalAverage = 0\n F = 0\n for user2 in self.dao.trainingSet_u:\n GlobalAverage += len(self.dao.trainingSet_u[user2]) / (len(self.dao.trainingSet_u) + 0.0)\n for user3 in self.dao.trainingSet_u:\n F += pow(len(self.dao.trainingSet_u[user3])-GlobalAverage,2)\n self.LengVar[user] = abs(len(self.dao.trainingSet_u[user])-GlobalAverage)/(F*1.0)\n\n Divisor = 0\n for item1 in self.dao.trainingSet_u[user]:\n Divisor += abs(self.dao.trainingSet_u[user][item1]-self.dao.itemMeans[item1])/len(self.dao.trainingSet_i[item1])\n self.RDMA[user] = Divisor/len(self.dao.trainingSet_u[user])\n\n Minuend, index1, Subtrahend, index2 = 0, 0, 0, 0\n for item3 in self.dao.trainingSet_u[user]:\n if(self.dao.trainingSet_u[user][item3]==5.0 or self.dao.trainingSet_u[user][item3]==1.0) :\n Minuend += sum(self.dao.trainingSet_i[item3].values())\n index1 += len(self.dao.trainingSet_i[item3])\n else:\n Subtrahend += sum(self.dao.trainingSet_i[item3].values())\n index2 += len(self.dao.trainingSet_i[item3])\n if index1 == 0 and index2 == 0:\n self.FMTD[user] = 0\n elif index1 == 0:\n self.FMTD[user] = abs(Subtrahend / index2)\n elif index2 == 0:\n self.FMTD[user] = abs(Minuend / index1)\n else:\n self.FMTD[user] = abs(Minuend / index1 - Subtrahend / index2)\n\n if trainingIndex==(trainingUserCount/5):\n print('trainingData Done 20%...')\n elif trainingIndex==(trainingUserCount/5*2):\n print('trainingData Done 40%...')\n elif trainingIndex==(trainingUserCount/5*3):\n print('trainingData Done 60%...')\n elif trainingIndex==(trainingUserCount/5*4):\n print('trainingData Done 80%...')\n elif trainingIndex==(trainingUserCount):\n print('trainingData Done 100%...')\n\n # computing H,DegSim,LengVar,RDMA,FMTD for UnLabledData set\n for user in self.dao.testSet_u:\n testIndex += 1\n self.H[user] = 0\n for i in range(10,50,5):\n n = 0\n for item in self.dao.testSet_u[user]:\n if(self.dao.testSet_u[user][item]==(i/10.0)):\n n+=1\n if n==0:\n self.H[user] += 0\n else:\n self.H[user] += (-(n/(testUserCount*1.0))*math.log(n/(testUserCount*1.0),2))\n\n SimList = []\n self.DegSim[user] = 0\n for user1 in self.dao.testSet_u:\n userA, userB, C, D, E, Count = 0,0,0,0,0,0\n for item in list(set(self.dao.testSet_u[user]).intersection(set(self.dao.testSet_u[user1]))):\n userA += self.dao.testSet_u[user][item]\n userB += self.dao.testSet_u[user1][item]\n Count += 1\n if Count==0:\n AverageA = 0\n AverageB = 0\n else:\n AverageA = userA/Count\n AverageB = userB/Count\n for item in list(set(self.dao.testSet_u[user]).intersection(set(self.dao.testSet_u[user1]))):\n C += (self.dao.testSet_u[user][item]-AverageA)*(self.dao.testSet_u[user1][item]-AverageB)\n D += np.square(self.dao.testSet_u[user][item]-AverageA)\n E += np.square(self.dao.testSet_u[user1][item]-AverageB)\n if C==0:\n SimList.append(0.0)\n else:\n SimList.append(C/(math.sqrt(D)*math.sqrt(E)))\n SimList.sort(reverse=True)\n for i in range(1,self.k+1):\n self.DegSim[user] += SimList[i] / self.k\n\n GlobalAverage = 0\n F = 0\n for user2 in self.dao.testSet_u:\n GlobalAverage += len(self.dao.testSet_u[user2]) / (len(self.dao.testSet_u) + 0.0)\n for user3 in self.dao.testSet_u:\n F += pow(len(self.dao.testSet_u[user3])-GlobalAverage,2)\n self.LengVar[user] = abs(len(self.dao.testSet_u[user])-GlobalAverage)/(F*1.0)\n\n Divisor = 0\n for item1 in self.dao.testSet_u[user]:\n Divisor += abs(self.dao.testSet_u[user][item1]-self.dao.itemMeans[item1])/len(self.dao.testSet_i[item1])\n self.RDMA[user] = Divisor/len(self.dao.testSet_u[user])\n\n Minuend, index1, Subtrahend, index2= 0,0,0,0\n for item3 in self.dao.testSet_u[user]:\n if(self.dao.testSet_u[user][item3]==5.0 or self.dao.testSet_u[user][item3]==1.0):\n Minuend += sum(self.dao.testSet_i[item3].values())\n index1 += len(self.dao.testSet_i[item3])\n else:\n Subtrahend += sum(self.dao.testSet_i[item3].values())\n index2 += len(self.dao.testSet_i[item3])\n if index1 == 0 and index2 == 0:\n self.FMTD[user] = 0\n elif index1 == 0:\n self.FMTD[user] = abs(Subtrahend / index2)\n elif index2 == 0:\n self.FMTD[user] = abs(Minuend / index1)\n else:\n self.FMTD[user] = abs(Minuend / index1 - Subtrahend / index2)\n\n if testIndex == testUserCount / 5:\n print('testData Done 20%...')\n elif testIndex == testUserCount / 5 * 2:\n print('testData Done 40%...')\n elif testIndex == testUserCount / 5 * 3:\n print('testData Done 60%...')\n elif testIndex == testUserCount / 5 * 4:\n print('testData Done 80%...')\n elif testIndex == testUserCount:\n print('testData Done 100%...')\n\n # preparing examples training for LabledData ,test for UnLableData\n\n for user in self.dao.trainingSet_u:\n self.training.append([self.H[user], self.DegSim[user], self.LengVar[user],self.RDMA[user],self.FMTD[user]])\n self.trainingLabels.append(self.labels[user])\n\n for user in self.dao.testSet_u:\n self.test.append([self.H[user], self.DegSim[user], self.LengVar[user],self.RDMA[user],self.FMTD[user]])\n self.testLabels.append(self.labels[user])\n\n def predict(self):\n ClassifierN = 0\n classifier = GaussianNB()\n X_train,X_test,y_train,y_test = train_test_split(self.training,self.trainingLabels,test_size=0.75,random_state=33)\n classifier.fit(X_train, y_train)\n # predict UnLabledData\n #pred_labelsForTrainingUn = classifier.predict(X_test)\n print('Enhanced classifier...')\n while 1:\n if len(X_test)<=5: # min\n break #min\n proba_labelsForTrainingUn = classifier.predict_proba(X_test)\n X_test_labels = np.hstack((X_test, proba_labelsForTrainingUn))\n X_test_labels0_sort = sorted(X_test_labels,key=lambda x:x[5],reverse=True)\n if X_test_labels0_sort[4][5]>X_test_labels0_sort[4][6]:\n a = [x[:5] for x in X_test_labels0_sort]\n b = a[0:5]\n classifier.partial_fit(b, ['0','0','0','0','0'], classes=['0', '1'],sample_weight=np.ones(len(b), dtype=np.float) * self.Lambda)\n X_test_labels = X_test_labels0_sort[5:]\n X_test = a[5:]\n if len(X_test)<6: # min\n break #min\n\n X_test_labels0_sort = sorted(X_test_labels, key=lambda x: x[5], reverse=True)\n if X_test_labels0_sort[4][5]<=X_test_labels0_sort[4][6]: #min\n a = [x[:5] for x in X_test_labels0_sort]\n b = a[0:5]\n classifier.partial_fit(b, ['1', '1', '1', '1', '1'], classes=['0', '1'],sample_weight=np.ones(len(b), dtype=np.float) * 1)\n X_test_labels = X_test_labels0_sort[5:] # min\n X_test = a[5:]\n if len(X_test)<6:\n break\n # while 1 :\n # p1 = pred_labelsForTrainingUn\n # # 将带λ参数的无标签数据拟合入分类器\n # classifier.partial_fit(X_test, pred_labelsForTrainingUn,classes=['0','1'], sample_weight=np.ones(len(X_test),dtype=np.float)*self.Lambda)\n # pred_labelsForTrainingUn = classifier.predict(X_test)\n # p2 = pred_labelsForTrainingUn\n # # 判断分类器是否稳定\n # if list(p1)==list(p2) :\n # ClassifierN += 1\n # elif ClassifierN > 0:\n # ClassifierN = 0\n # if ClassifierN == 20:\n # break\n pred_labels = classifier.predict(self.test)\n print('naive_bayes with EM algorithm:')\n return pred_labels", "_____no_output_____" ] ], [ [ "## Main", "_____no_output_____" ] ], [ [ "class SDLib(object):\n def __init__(self,config):\n self.trainingData = [] # training data\n self.testData = [] # testData\n self.relation = []\n self.measure = []\n self.config =config\n self.ratingConfig = LineConfig(config['ratings.setup'])\n self.labels = FileIO.loadLabels(config['label'])\n\n if self.config.contains('evaluation.setup'):\n self.evaluation = LineConfig(config['evaluation.setup'])\n \n if self.evaluation.contains('-testSet'):\n #specify testSet\n self.trainingData = FileIO.loadDataSet(config, config['ratings'])\n self.testData = FileIO.loadDataSet(config, self.evaluation['-testSet'], bTest=True)\n\n elif self.evaluation.contains('-ap'):\n #auto partition\n self.trainingData = FileIO.loadDataSet(config,config['ratings'])\n self.trainingData,self.testData = DataSplit.\\\n dataSplit(self.trainingData,test_ratio=float(self.evaluation['-ap']))\n\n elif self.evaluation.contains('-cv'):\n #cross validation\n self.trainingData = FileIO.loadDataSet(config, config['ratings'])\n #self.trainingData,self.testData = DataSplit.crossValidation(self.trainingData,int(self.evaluation['-cv']))\n\n else:\n print('Evaluation is not well configured!')\n exit(-1)\n\n if config.contains('social'):\n self.socialConfig = LineConfig(self.config['social.setup'])\n self.relation = FileIO.loadRelationship(config,self.config['social'])\n print('preprocessing...')\n\n\n def execute(self):\n if self.evaluation.contains('-cv'):\n k = int(self.evaluation['-cv'])\n if k <= 1 or k > 10:\n k = 3\n #create the manager used to communication in multiprocess\n manager = Manager()\n m = manager.dict()\n i = 1\n tasks = []\n for train,test in DataSplit.crossValidation(self.trainingData,k):\n fold = '['+str(i)+']'\n if self.config.contains('social'):\n method = self.config['methodName'] + \"(self.config,train,test,self.labels,self.relation,fold)\"\n else:\n method = self.config['methodName'] + \"(self.config,train,test,self.labels,fold)\"\n #create the process\n p = Process(target=run,args=(m,eval(method),i))\n tasks.append(p)\n i+=1\n #start the processes\n for p in tasks:\n p.start()\n #wait until all processes are completed\n for p in tasks:\n p.join()\n #compute the mean error of k-fold cross validation\n self.measure = [dict(m)[i] for i in range(1,k+1)]\n res = []\n pattern = re.compile('(\\d+\\.\\d+)')\n countPattern = re.compile('\\d+\\\\n')\n labelPattern = re.compile('\\s\\d{1}[^\\.|\\n|\\d]')\n labels = re.findall(labelPattern, self.measure[0])\n values = np.array([0]*9,dtype=float)\n count = np.array([0,0,0],dtype=int)\n for report in self.measure:\n patterns = np.array(re.findall(pattern,report),dtype=float)\n values += patterns[:9]\n patterncounts = np.array(re.findall(countPattern,report),dtype=int)\n count += patterncounts[:3]\n values/=k\n values=np.around(values,decimals=4)\n res.append(' precision recall f1-score support\\n\\n')\n res.append(' '+labels[0]+' '+' '.join(np.array(values[0:3],dtype=str).tolist())+' '+str(count[0])+'\\n')\n res.append(' '+labels[1]+' '+' '.join(np.array(values[3:6],dtype=str).tolist())+' '+str(count[1])+'\\n\\n')\n res.append(' avg/total ' + ' '.join(np.array(values[6:9], dtype=str).tolist()) + ' ' + str(count[2]) + '\\n')\n print('Total:')\n print(''.join(res))\n # for line in lines[1:]:\n #\n # measure = self.measure[0][i].split(':')[0]\n # total = 0\n # for j in range(k):\n # total += float(self.measure[j][i].split(':')[1])\n # res.append(measure+':'+str(total/k)+'\\n')\n #output result\n currentTime = strftime(\"%Y-%m-%d %H-%M-%S\", localtime(time()))\n outDir = LineConfig(self.config['output.setup'])['-dir']\n fileName = self.config['methodName'] +'@'+currentTime+'-'+str(k)+'-fold-cv' + '.txt'\n FileIO.writeFile(outDir,fileName,res)\n print('The results have been output to '+abspath(LineConfig(self.config['output.setup'])['-dir'])+'\\n')\n else:\n if self.config.contains('social'):\n method = self.config['methodName'] + '(self.config,self.trainingData,self.testData,self.labels,self.relation)'\n else:\n method = self.config['methodName'] + '(self.config,self.trainingData,self.testData,self.labels)'\n eval(method).execute()\n\n\ndef run(measure,algor,order):\n measure[order] = algor.execute()", "_____no_output_____" ], [ "conf = Config('DegreeSAD.conf')\nsd = SDLib(conf)\nsd.execute()", "loading training data...\npreprocessing...\nAlgorithm: DegreeSAD\nRatings dataSet: /content/dataset/amazon/profiles.txt\nTraining set size: (user count: 3921, item count 14711, record count: 40730)\nTest set size: (user count: 981, item count 6079, record count: 10368)\n================================================================================\nInitializing model [1]...\nBuilding Model [1]...\nInitializing model [2]...\nBuilding Model [2]...\nInitializing model [3]...\nBuilding Model [3]...\nInitializing model [4]...\nBuilding Model [4]...\nInitializing model [5]...\nBuilding Model [5]...\nPredicting [1]...\nDecision Tree:\nPredicting [2]...\n precision recall f1-score support\n\n 0 0.7709 0.8498 0.8084 586\n 1 0.7373 0.6253 0.6767 395\n\n accuracy 0.7594 981\n macro avg 0.7541 0.7376 0.7426 981\nweighted avg 0.7574 0.7594 0.7554 981\n\nDecision Tree:\n precision recall f1-score support\n\n 0 0.7852 0.8425 0.8128 603\n 1 0.7156 0.6323 0.6713 378\n\n accuracy 0.7615 981\n macro avg 0.7504 0.7374 0.7421 981\nweighted avg 0.7583 0.7615 0.7583 981\n\nPredicting [3]...\nDecision Tree:\n precision recall f1-score support\n\n 0 0.7658 0.8381 0.8003 593\n 1 0.7100 0.6072 0.6546 387\n\n accuracy 0.7469 980\n macro avg 0.7379 0.7227 0.7275 980\nweighted avg 0.7437 0.7469 0.7428 980\n\nPredicting [5]...\nPredicting [4]...\nDecision Tree:\n precision recall f1-score support\n\n 0 0.7876 0.8148 0.8010 610\n 1 0.6762 0.6378 0.6565 370\n\n accuracy 0.7480 980\n macro avg 0.7319 0.7263 0.7287 980\nweighted avg 0.7456 0.7480 0.7464 980\n\nDecision Tree:\n precision recall f1-score support\n\n 0 0.7929 0.8507 0.8208 603\n 1 0.7297 0.6446 0.6845 377\n\n accuracy 0.7714 980\n macro avg 0.7613 0.7477 0.7527 980\nweighted avg 0.7686 0.7714 0.7684 980\n\nTotal:\n precision recall f1-score support\n\n 0 0.7805 0.8392 0.8087 2995\n 1 0.7138 0.6294 0.6687 1907\n\n avg/total 0.7574 0.7471 0.7343 4902\n\nThe results have been output to /content/results\n\n" ], [ "print('='*80)\nprint('Supervised Methods:')\nprint('1. DegreeSAD 2.CoDetector 3.BayesDetector\\n')\nprint('Semi-Supervised Methods:')\nprint('4. SemiSAD\\n')\nprint('Unsupervised Methods:')\nprint('5. PCASelectUsers 6. FAP 7.timeIndex\\n')\nprint('-'*80)\norder = eval(input('please enter the num of the method to run it:'))\n\nalgor = -1\nconf = -1\n\ns = tm.clock()\n\nif order == 1:\n conf = Config('DegreeSAD.conf')\n\nelif order == 2:\n conf = Config('CoDetector.conf')\n\nelif order == 3:\n conf = Config('BayesDetector.conf')\n\nelif order == 4:\n conf = Config('SemiSAD.conf')\n\nelif order == 5:\n conf = Config('PCASelectUsers.conf')\n\nelif order == 6:\n conf = Config('FAP.conf')\nelif order == 7:\n conf = Config('timeIndex.conf')\n\nelse:\n print('Error num!')\n exit(-1)\n\n# conf = Config('DegreeSAD.conf')\n\nsd = SDLib(conf)\nsd.execute()\ne = tm.clock()\nprint(\"Run time: %f s\" % (e - s))", "================================================================================\nSupervised Methods:\n1. DegreeSAD 2.CoDetector 3.BayesDetector\n\nSemi-Supervised Methods:\n4. SemiSAD\n\nUnsupervised Methods:\n5. PCASelectUsers 6. FAP 7.timeIndex\n\n--------------------------------------------------------------------------------\nplease enter the num of the method to run it:2\n" ], [ "print('='*80)\nprint('Supervised Methods:')\nprint('1. DegreeSAD 2.CoDetector 3.BayesDetector\\n')\nprint('Semi-Supervised Methods:')\nprint('4. SemiSAD\\n')\nprint('Unsupervised Methods:')\nprint('5. PCASelectUsers 6. FAP 7.timeIndex\\n')\nprint('-'*80)\norder = eval(input('please enter the num of the method to run it:'))\n\nalgor = -1\nconf = -1\n\ns = tm.clock()\n\nif order == 1:\n conf = Config('DegreeSAD.conf')\n\nelif order == 2:\n conf = Config('CoDetector.conf')\n\nelif order == 3:\n conf = Config('BayesDetector.conf')\n\nelif order == 4:\n conf = Config('SemiSAD.conf')\n\nelif order == 5:\n conf = Config('PCASelectUsers.conf')\n\nelif order == 6:\n conf = Config('FAP.conf')\nelif order == 7:\n conf = Config('timeIndex.conf')\n\nelse:\n print('Error num!')\n exit(-1)\n\n# conf = Config('DegreeSAD.conf')\n\nsd = SDLib(conf)\nsd.execute()\ne = tm.clock()\nprint(\"Run time: %f s\" % (e - s))", "================================================================================\nSupervised Methods:\n1. DegreeSAD 2.CoDetector 3.BayesDetector\n\nSemi-Supervised Methods:\n4. SemiSAD\n\nUnsupervised Methods:\n5. PCASelectUsers 6. FAP 7.timeIndex\n\n--------------------------------------------------------------------------------\nplease enter the num of the method to run it:1\nloading training data...\npreprocessing...\n" ], [ "print('='*80)\nprint('Supervised Methods:')\nprint('1. DegreeSAD 2.CoDetector 3.BayesDetector\\n')\nprint('Semi-Supervised Methods:')\nprint('4. SemiSAD\\n')\nprint('Unsupervised Methods:')\nprint('5. PCASelectUsers 6. FAP 7.timeIndex\\n')\nprint('-'*80)\norder = eval(input('please enter the num of the method to run it:'))\n\nalgor = -1\nconf = -1\n\ns = tm.clock()\n\nif order == 1:\n conf = Config('DegreeSAD.conf')\n\nelif order == 2:\n conf = Config('CoDetector.conf')\n\nelif order == 3:\n conf = Config('BayesDetector.conf')\n\nelif order == 4:\n conf = Config('SemiSAD.conf')\n\nelif order == 5:\n conf = Config('PCASelectUsers.conf')\n\nelif order == 6:\n conf = Config('FAP.conf')\nelif order == 7:\n conf = Config('timeIndex.conf')\n\nelse:\n print('Error num!')\n exit(-1)\n\n# conf = Config('DegreeSAD.conf')\n\nsd = SDLib(conf)\nsd.execute()\ne = tm.clock()\nprint(\"Run time: %f s\" % (e - s))", "================================================================================\nSupervised Methods:\n1. DegreeSAD 2.CoDetector 3.BayesDetector\n\nSemi-Supervised Methods:\n4. SemiSAD\n\nUnsupervised Methods:\n5. PCASelectUsers 6. FAP 7.timeIndex\n\n--------------------------------------------------------------------------------\nplease enter the num of the method to run it:6\n" ], [ "print('='*80)\nprint('Supervised Methods:')\nprint('1. DegreeSAD 2.CoDetector 3.BayesDetector\\n')\nprint('Semi-Supervised Methods:')\nprint('4. SemiSAD\\n')\nprint('Unsupervised Methods:')\nprint('5. PCASelectUsers 6. FAP 7.timeIndex\\n')\nprint('-'*80)\norder = eval(input('please enter the num of the method to run it:'))\n\nalgor = -1\nconf = -1\n\ns = tm.clock()\n\nif order == 1:\n conf = Config('DegreeSAD.conf')\n\nelif order == 2:\n conf = Config('CoDetector.conf')\n\nelif order == 3:\n conf = Config('BayesDetector.conf')\n\nelif order == 4:\n conf = Config('SemiSAD.conf')\n\nelif order == 5:\n conf = Config('PCASelectUsers.conf')\n\nelif order == 6:\n conf = Config('FAP.conf')\nelif order == 7:\n conf = Config('timeIndex.conf')\n\nelse:\n print('Error num!')\n exit(-1)\n\n# conf = Config('DegreeSAD.conf')\n\nsd = SDLib(conf)\nsd.execute()\ne = tm.clock()\nprint(\"Run time: %f s\" % (e - s))", "================================================================================\nSupervised Methods:\n1. DegreeSAD 2.CoDetector 3.BayesDetector\n\nSemi-Supervised Methods:\n4. SemiSAD\n\nUnsupervised Methods:\n5. PCASelectUsers 6. FAP 7.timeIndex\n\n--------------------------------------------------------------------------------\nplease enter the num of the method to run it:4\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d058bc2086f07d55181cdc58704c07be5907e43c
12,742
ipynb
Jupyter Notebook
financial_models/Gold_Random_Forest_Regressor_6.7+.ipynb
labs13-quake-viewer/ds-notebooks
3b649ff6d51ede244652b008cb49c218e9a96c54
[ "MIT" ]
null
null
null
financial_models/Gold_Random_Forest_Regressor_6.7+.ipynb
labs13-quake-viewer/ds-notebooks
3b649ff6d51ede244652b008cb49c218e9a96c54
[ "MIT" ]
1
2019-06-10T15:18:55.000Z
2019-06-10T15:42:05.000Z
financial_models/Gold_Random_Forest_Regressor_6.7+.ipynb
labs13-quake-viewer/ds-notebooks
3b649ff6d51ede244652b008cb49c218e9a96c54
[ "MIT" ]
1
2022-01-28T19:34:19.000Z
2022-01-28T19:34:19.000Z
12,742
12,742
0.624627
[ [ [ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "# Load S&P 500 df\ndf_quake_gold = pd.read_csv(\"https://raw.githubusercontent.com/labs13-quake-viewer/ds-data/master/\" +\n \"Gold%20Price%20Change%20by%20Earthquake.csv\", index_col=0)\ndf_quake_gold.shape", "_____no_output_____" ], [ "df_quake_gold.head()", "_____no_output_____" ], [ "dates = []\nfor i in df_quake_gold.Date:\n dates.append(int(''.join(c for c in i if c.isdigit())))", "_____no_output_____" ], [ "df_quake_gold[\"magg\"] = (df_quake_gold[\"Mag\"] * 10).astype(int)", "_____no_output_____" ], [ "df_quake_gold[\"dates\"] = dates", "_____no_output_____" ], [ "df_quake_gold.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1445 entries, 0 to 1444\nData columns (total 11 columns):\nDate 1445 non-null object\nMag 1445 non-null float64\nPrice_Day_0 1445 non-null float64\nPrice_Day_7 1445 non-null float64\nPrice_Day_14 1445 non-null float64\nPrice_Day_30 1445 non-null float64\nAppr_Day_7 1445 non-null float64\nAppr_Day_14 1445 non-null float64\nAppr_Day_30 1445 non-null float64\nmagg 1445 non-null int64\ndates 1445 non-null int64\ndtypes: float64(8), int64(2), object(1)\nmemory usage: 135.5+ KB\n" ], [ "y = df_quake_gold['Appr_Day_30']\nX = df_quake_gold[['dates', 'Mag']]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=42)\nprint(\"Original shape:\", X.shape, \"\\n\")\n\nprint(\"X_train shape:\", X_train.shape)\nprint(\"X_test shape:\", X_test.shape)\nprint(\"y_train shape:\", y_train.shape)\nprint(\"y_test shape:\", y_test.shape)", "Original shape: (1445, 2) \n\nX_train shape: (1083, 2)\nX_test shape: (362, 2)\ny_train shape: (1083,)\ny_test shape: (362,)\n" ], [ "X_train.sample()", "_____no_output_____" ], [ "# Instantiate model with 100 decision trees\nrf = RandomForestRegressor(n_estimators = 100, random_state = 42)", "_____no_output_____" ], [ "# Train model on training data\nrf.fit(X_train, y_train)", "_____no_output_____" ], [ "# Use forest's predict method on test data\npredictions = rf.predict(X_test)", "_____no_output_____" ], [ "# Calculate absolute errors\nerrors = abs(predictions - y_test)", "_____no_output_____" ], [ "# Print out mean absolute error\nprint('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')", "Mean Absolute Error: 3.11 degrees.\n" ], [ "# Calculate and display accuracy\naccuracy = errors.sum() / y_test.sum()\nprint(\"For Gold, Incident Mag >= 6.7 ({} incidents)\".format(df_quake_gold.shape[0]))\nprint(\"Random Forest Regressor Model score:\", rf.score(X_train, y_train))\nprint('Predictive Accuracy:', round(accuracy, 2), '%.')", "For Gold, Incident Mag >= 6.7 (1445 incidents)\nRandom Forest Regressor Model score: 0.911516941707296\nPredictive Accuracy: 6.14 %.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d058c4a2d80b5365651f58cf0f6e8ba48d3a5ba3
259,199
ipynb
Jupyter Notebook
src/Fig5.ipynb
AJueling/FW-code
185993e45a2bc95aab0043ec7053b5bbad051f08
[ "BSD-3-Clause" ]
null
null
null
src/Fig5.ipynb
AJueling/FW-code
185993e45a2bc95aab0043ec7053b5bbad051f08
[ "BSD-3-Clause" ]
null
null
null
src/Fig5.ipynb
AJueling/FW-code
185993e45a2bc95aab0043ec7053b5bbad051f08
[ "BSD-3-Clause" ]
null
null
null
1,112.44206
250,812
0.952843
[ [ [ "# Calculate the AMOC in density space\n\n$VVEL*DZT*DXT (x,y,z)$ -> $VVEL*DZT*DXT (x,y,$\\sigma$)$ -> $\\sum_{x=W}^E$ -> $\\sum_{\\sigma=\\sigma_{max/min}}^\\sigma$", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport xgcm\nimport numpy as np\nimport xarray as xr\nimport cmocean\nimport pop_tools\nimport matplotlib\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "%matplotlib inline\nmatplotlib.rc_file('rc_file_paper')\n%config InlineBackend.print_figure_kwargs={'bbox_inches':None}\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from MOC import calculate_AMOC_sigma_z\nfrom tqdm import notebook\nfrom paths import path_results, path_prace, file_RMASK_ocn, file_RMASK_ocn_low, file_ex_ocn_ctrl, file_ex_ocn_lpd, path_data\nfrom FW_plots import Atl_lats\nfrom timeseries import lowpass\nfrom xhistogram.xarray import histogram\nfrom xr_DataArrays import xr_DZ_xgcm\nfrom xr_regression import xr_lintrend, xr_linear_trend, xr_2D_trends, ocn_field_regression", "_____no_output_____" ], [ "RAPIDz = xr.open_dataarray(f'{path_data}/RAPID_AMOC/moc_vertical.nc')", "_____no_output_____" ], [ "kwargs = dict(combine='nested', concat_dim='time', decode_times=False)\nds_ctrl = xr.open_mfdataset(f'{path_prace}/MOC/AMOC_sz_yz_ctrl_*.nc', **kwargs)\nds_rcp = xr.open_mfdataset(f'{path_prace}/MOC/AMOC_sz_yz_rcp_*.nc' , **kwargs)\nds_lpd = xr.open_mfdataset(f'{path_prace}/MOC/AMOC_sz_yz_lpd_*.nc' , **kwargs)\nds_lr1 = xr.open_mfdataset(f'{path_prace}/MOC/AMOC_sz_yz_lr1_*.nc' , **kwargs)", "_____no_output_____" ], [ "AMOC_ctrl = xr.open_dataarray(f'{path_results}/MOC/AMOC_max_ctrl.nc', decode_times=False)\nAMOC_rcp = xr.open_dataarray(f'{path_results}/MOC/AMOC_max_rcp.nc' , decode_times=False)\nAMOC_lpd = xr.open_dataarray(f'{path_results}/MOC/AMOC_max_lpd.nc' , decode_times=False)\nAMOC_lr1 = xr.open_dataarray(f'{path_results}/MOC/AMOC_max_lr1.nc' , decode_times=False)", "_____no_output_____" ], [ "mycmap = cmocean.tools.crop_by_percent(cmocean.cm.curl, 100/3, which='min', N=None)\nf = plt.figure(figsize=(6.4,5))\n\n# profiles\nax = f.add_axes([.84,.55,.15,.4])\nax.set_title(r'26.5$\\!^\\circ\\!$N')\nax.set_ylim((-6,0))\nax.set_yticklabels([])\nax.axvline(0, c='k', lw=.5)\nax.axhline(-1, c='k', lw=.5)\nr, = ax.plot(RAPIDz.mean('time'), -RAPIDz.depth/1e3, c='k', label='RAPID')\nRAPID_ctrl = ds_ctrl['AMOC(y,z)'].isel(nlat_u=1456).mean('time')\nRAPID_lpd = ds_lpd ['AMOC(y,z)'].isel(nlat_u= 271).mean('time')\nRAPID_rcp = 365*100*xr_linear_trend(ds_rcp['AMOC(y,z)'].isel(nlat_u=1456)).rename({'dim_0':'z_t'}).assign_coords(z_t=ds_rcp.z_t) + RAPID_ctrl\nRAPID_lr1 = 365*100*xr_linear_trend(ds_lr1['AMOC(y,z)'].isel(nlat_u= 271)).rename({'dim_0':'z_t'}).assign_coords(z_t=ds_lr1.z_t) + RAPID_lpd \nhc, = ax.plot(RAPID_ctrl, -ds_ctrl.z_t/1e5, c='k', ls='--', label='HR CTRL')\nlc, = ax.plot(RAPID_lpd , -ds_lpd .z_t/1e5, c='k', ls=':' , label='LR CTRL')\nhr, = ax.plot(RAPID_rcp , -ds_ctrl.z_t/1e5, c='k', ls='--', lw=.7, label='HR RCP')\nlr, = ax.plot(RAPID_lr1 , -ds_lpd .z_t/1e5, c='k', ls=':' , lw=.7, label='LR RCP')\nax.text(.01,.92, '(c)', transform=ax.transAxes)\nax.set_xlabel('AMOC [Sv]')\nax.legend(handles=[r, hc, lc, hr, lr], fontsize=5, frameon=False, handlelength=2, loc='lower right')\n\nfor i, sim in enumerate(['HIGH', 'LOW']):\n axt = f.add_axes([.1+i*.37,.55,.35,.4])\n axb = f.add_axes([.1+i*.37,.09,.35,.35])\n \n # psi\n axt.set_title(['HR-CESM', 'LR-CESM'][i])\n axt.set_ylim((-6,0))\n axt.set_xlim((-34,60))\n if i==0:\n axt.set_ylabel('depth [km]')\n axb.set_ylabel('AMOC at 26.5$\\!^\\circ\\!$N, 1000 m')\n else:\n axt.set_yticklabels([])\n axb.set_yticklabels([])\n \n (ds_mean, ds_trend) = [(ds_ctrl, ds_rcp), (ds_lpd, ds_lr1)][i]\n vmaxm = 25\n vmaxt = 10\n mean = ds_mean['AMOC(y,z)'].mean('time')\n trend = xr_2D_trends(ds_trend['AMOC(y,z)']).rolling(nlat_u=[15,3][i]).mean()*100*365\n Xm,Ym = np.meshgrid(Atl_lats(sim=sim), -1e-5*mean['z_t'].values)\n Xt,Yt = np.meshgrid(Atl_lats(sim=sim), -1e-5*trend['z_t'].values)\n im = axt.contourf(Xm, Ym, mean, cmap=mycmap, levels=np.arange(-8,25,1))\n cs = axt.contour(Xm, Ym, trend, levels=np.arange(-12,3,1),\n cmap='cmo.balance', vmin=-10, vmax=10, linewidths=.5)\n axt.clabel(cs, np.arange(-12,3,2), fmt='%d', fontsize=7)\n axt.text(.01,.92, '('+['a','b'][i]+')', transform=axt.transAxes)\n axt.scatter(26.5,-1, color='w', marker='x')\n \n axt.set_xlabel(r'latitude $\\theta$ [$\\!^{\\!\\circ}\\!$N]')\n \n # time series\n AMOC_c = [AMOC_ctrl, AMOC_lpd][i]\n AMOC_r = [AMOC_rcp , AMOC_lr1][i]\n\n axb.set_xlabel('time [model years]')\n axb.plot(AMOC_c.time/365, AMOC_c, c='C0', alpha=.3, lw=.5)\n axb.plot(AMOC_c.time[60:-60]/365, lowpass(AMOC_c,120)[60:-60], c='C0', label='CTRL')\n \n axb.plot(AMOC_r.time/365-[1800,1500][i], AMOC_r, c='C1', alpha=.3, lw=.5)\n axb.plot(AMOC_r.time[60:-60]/365-[1800,1500][i], lowpass(AMOC_r,120)[60:-60], c='C1', label='RCP')\n axb.plot(AMOC_r.time/365-[1800,1500][i], xr_lintrend(AMOC_r), c='grey', lw=.8, ls='--', label='RCP linear fit')\n axb.text(25+[200,500][i], 5.8, f'{xr_linear_trend(AMOC_r).values*100*365:3.2f} Sv/100yr', color='grey', fontsize=7)\n\n axb.set_ylim((4,29.5))\n if i==0:\n axb.legend(frameon=False, fontsize=8)\n axb.set_xlim([(95,305), (395,605)][i])\n axb.text(.01,.91, '('+['d','e'][i]+')', transform=axb.transAxes)\n \ncax1 = f.add_axes([.88,.12,.02,.3])\nf.colorbar(im, cax=cax1)\ncax1.text(1,-.1,'[Sv]', ha='right', fontsize=7, transform=cax1.transAxes)\ncax1.yaxis.set_ticks_position('left')\n\ncax2 = f.add_axes([.92,.12,.02,.3])\nf.colorbar(cs, cax=cax2)\ncax2.text(-.1,-.1,'[Sv/100yr]', ha='left', fontsize=7, transform=cax2.transAxes)\n# plt.savefig(f'{path_results}/FW-paper/Fig5', dpi=600)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d058c92800f8b98954afe6ef2d276e1d7ae37f6a
5,149
ipynb
Jupyter Notebook
notebooks/alignment.ipynb
Code-128/depth-quality
0a0220f7791f230631544d0720bbd3fa0f636269
[ "MIT" ]
5
2019-12-02T19:46:24.000Z
2020-10-26T18:51:22.000Z
notebooks/alignment.ipynb
Code-128/depth-quality
0a0220f7791f230631544d0720bbd3fa0f636269
[ "MIT" ]
6
2019-03-28T23:38:31.000Z
2022-01-31T16:40:41.000Z
notebooks/alignment.ipynb
Code-128/depth-quality
0a0220f7791f230631544d0720bbd3fa0f636269
[ "MIT" ]
6
2019-03-27T12:44:27.000Z
2022-01-31T16:07:39.000Z
26.678756
129
0.560886
[ [ [ "import depthquality.quality as quality\nimport depthquality.meshes as meshes\nimport pkg_resources\nimport os", "_____no_output_____" ] ], [ [ "A sample of running the horizontal cylinder code through the pipeline, and visualizing it with Meshcat.", "_____no_output_____" ] ], [ [ "folder_name = \"vert_cylinders\"\nrgb_filename = os.path.join(\"..\", \"src\", \"tests\", \"data\", folder_name, \"1.png\")\ncamera_matrix_filename = os.path.join(\"..\", \"src\", \"tests\", \"data\", folder_name, \"camera_matrix.json\")\npointcloud_filename = os.path.join(\"..\", \"src\", \"tests\", \"data\", folder_name, \"1.ply\")\nreference_mesh = meshes.VERTICAL_CYLINDERS", "_____no_output_____" ], [ "aligned_pointcloud, camera_angle = quality.align_pointcloud_to_reference(\n reference_mesh, rgb_filename, camera_matrix_filename, pointcloud_filename, depth_scale=0.001)\n\n# if you want to save the pointcloud to disk and load it in another visualizer\n# quality.save_pointcloud(pointcloud_filename, \"transformed\", aligned_pointcloud)\n\ncropped_pointcloud = quality.clip_pointcloud_to_pattern_area(\n reference_mesh, aligned_pointcloud, depth_scale=0.001)\n\n# if you want to save the pointcloud to disk and load it in another visualizer\n# quality.save_pointcloud(pointcloud_filename, \"cropped\", cropped_pointcloud)\n\nrmse, density = quality.calculate_rmse_and_density(\n ground_truth_mesh=reference_mesh,\n cropped_pointcloud=cropped_pointcloud,\n depth_scale=0.001,\n camera_angle=camera_angle)", "_____no_output_____" ], [ "print(\"RMSE = {}, density= {}\".format(rmse, density))\nprint(camera_angle)", "RMSE = 1.6844201070129325, density= 1.7608021498435062\n[ 0.00533419 -0.11536905 0.99330838]\n" ] ], [ [ "We can use Meshcat to visualize our geometry directly in a Jupyter Notebook.", "_____no_output_____" ] ], [ [ "import meshcat\nimport meshcat.geometry as g\nimport meshcat.transformations as tfms\nimport numpy as np", "_____no_output_____" ], [ "vis = meshcat.Visualizer()\nvis.jupyter_cell()", "You can open the visualizer by visiting the following URL:\nhttp://127.0.0.1:7000/static/\n" ], [ "vis['reference'].set_object(g.ObjMeshGeometry.from_file(reference_mesh.path))\nvis['reference'].set_transform(tfms.scale_matrix(0.001))", "_____no_output_____" ], [ "vis['transformed_cloud'].set_object(\n g.PointCloud(np.asarray(aligned_pointcloud.points).T,\n np.asarray(aligned_pointcloud.colors).T)\n)\n\nvis['cropped_cloud'].set_object(\n g.PointCloud(np.asarray(cropped_pointcloud.points).T,\n np.asarray(cropped_pointcloud.colors).T)\n)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d058c98f9a72720a89d14e0730d1a057ab0fd055
11,668
ipynb
Jupyter Notebook
AlgoTrading/ParameterAnalysis/.ipynb_checkpoints/Optimization_DOGE_day-checkpoint.ipynb
xujunhuii/huobi_Python
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
[ "Apache-2.0" ]
null
null
null
AlgoTrading/ParameterAnalysis/.ipynb_checkpoints/Optimization_DOGE_day-checkpoint.ipynb
xujunhuii/huobi_Python
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
[ "Apache-2.0" ]
null
null
null
AlgoTrading/ParameterAnalysis/.ipynb_checkpoints/Optimization_DOGE_day-checkpoint.ipynb
xujunhuii/huobi_Python
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
[ "Apache-2.0" ]
null
null
null
27.913876
98
0.361759
[ [ [ "%load_ext lab_black\nimport pandas as pd\nimport json\n\n\nf = open(\"histoday_DOGECOIN.json\",)\ndata = json.load(f)\ndata = pd.DataFrame.from_dict(data[\"Data\"][\"Data\"])\ndata = data[[\"time\", \"close\"]]\nf.close()\ndata", "_____no_output_____" ], [ "def get_profit(lower_limit, upper_limit):\n i = 0\n profit = 0\n records = []\n n = 0\n for index, row in data.iterrows():\n day = index\n price = row[\"close\"]\n if i % 2 == 0:\n if row[\"RSI\"] <= lower_limit and n == 0:\n profit -= price\n records.append([\"Buying\", -round(price), round(profit), day])\n n += 1\n i += 1\n else:\n if row[\"RSI\"] >= upper_limit and n >= 0 and profit + price >= 1:\n profit += price\n records.append([\"Selling\", round(price), round(profit), day])\n n -= 1\n i += 1\n return profit\n\n\ndef calculation(data, window_length, lower_limit, upper_limit):\n delta = data[\"close\"].diff()\n up, down = delta.copy(), delta.copy()\n up[up <= 1] = 0\n down[down >= 2] = 0\n roll_up1 = up.ewm(span=window_length).mean()\n roll_down1 = down.abs().ewm(span=window_length).mean()\n RS1 = roll_up1 / roll_down1\n RSI1 = 100.0 - (100.0 / (1.0 + RS1))\n data.loc[:, \"RSI\"] = RSI1\n data.loc[:, \"TimeStamp\"] = pd.to_datetime(data[\"time\"], unit=\"s\")\n data = data.set_index(\"TimeStamp\")\n return get_profit(lower_limit, upper_limit)\n\n\ncalculation(data, 14, 50, 60)", "_____no_output_____" ], [ "%%time\nfrom tqdm import tqdm\noptimizations=[]\nfor window_length in tqdm(range(10, 20)):\n for lower_limit in range(10, 30):\n for upper_limit in range(80, 100):\n profit = calculation(data, window_length, lower_limit,upper_limit)\n optimizations.append([profit, window_length, lower_limit, upper_limit])", "100%|██████████| 10/10 [31:07<00:00, 186.76s/it]" ], [ "optimization_df = pd.DataFrame(\n optimizations, columns=[\"Profit\", \"WindowLength\", \"LowerLimit\", \"UpperLimit\"]\n)\npd.set_option(\"max_rows\", 30)\noptimization_df", "_____no_output_____" ], [ "# optimization_df.to_excel(\"Profit_Optimization_DOGE_day.xlsx\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d058cbcf0540519534518d2e86df34123cf19ae2
98,974
ipynb
Jupyter Notebook
Toast2.2.ipynb
smithrockmaker/PH213
1f5796337c1f17f649532f6ccdfb59f02e397963
[ "MIT" ]
null
null
null
Toast2.2.ipynb
smithrockmaker/PH213
1f5796337c1f17f649532f6ccdfb59f02e397963
[ "MIT" ]
null
null
null
Toast2.2.ipynb
smithrockmaker/PH213
1f5796337c1f17f649532f6ccdfb59f02e397963
[ "MIT" ]
null
null
null
163.323432
67,332
0.861752
[ [ [ "# ENGR 213 Project Demonstration: Toast Falling from Counter\n## Iteration AND slipping of toast\n\nThis is a Jupyter notebook created to explore the utility of notebooks as an engineering/physics tool. As I consider integrating this material into physics and engineering courses I am having a hard time clarifying the outcomes that I seek for the students. It seems plausible that understanding what it would take to implement the 'Toast Project\" in a way which satisfies me would be helpful to indentify those skills and outcomes I hope for.\n\nI hope to do a good job of documentation as I go but intentions are quirky creatures and prone to change:)\n\n### Today's Learning:\n\nI've been working may way through this for a number of days (a couple of hours at a time). I just realized that it's getting very cumbersome to try to keep each upgrade to the model in the same notebook. I'm going to keep this notebook as an object lesson of how that can happen. In the meantime I'm going to rebuild this notebook into three new notebooks that keep each stage of the process independent. A very helpful discovery about my own workflow.....\n\n#### Exporting this document to pdf\n\nThis has not behaved well so far for me. My current most successful strategy is to download the Jupyter notebook as an html file, open in Firefox, print the file - which gives me the option to save the html document as a pdf. This is not terrible but it's not as good as I would like it.\n\nI also have had some luck with downloading as a .tex file and running it through TeXWorks which complains a bit but ultimately gives pretty output. This may be my strategy.", "_____no_output_____" ], [ "## The Problem\n\nThe basic problem is this. When toast teeters off the edge of a standard counter it seems to, remarkably consistently, land 'jelly side' down. I have read that this is no accident and that in fact the solution to the problem is to push the toast when you notice it going off the edge rather than try to stop it. I have done some loose experiments and find this to be empirically true. Now -- can I use basic dynamics and an numerical approach to explore this problem without getting caught up the analytic solution to the problem documented in various places in AJP. \n\nIn the previous notebook I modelled this process assuming that the angular acceleration would be changing as the toast tips over the edge. Experimentally starting with a piece of toast 3/4 of the way off the edge and then releasing it I observe that it rotates about 2𝜋 radians before it hits the floor (a little more perhaps). My basic iteration model predicted 8ish radians which is more than which is only a little over $2\\pi$ radians. Definitely getting closer to my observations. It seems likely that there is a point at which the gravitational forces will make the toast begin to slip 'laterally'. This will increase the moment of inertia and the net torque in ways that are hard to predict intuitively. That is the reason to try and model this more complex process.\n ", "_____no_output_____" ], [ "# Code\n\nThe code starts the same way as the previous model. I will retain the comment block to keep things consistent.\n\nThe following just sets up the plotting, `matplotlib`, and numerical processing, `numpy`, libaries for use by the kernal. As is apparently common they are given shorter aliases as `plt` and `np`.\n\nHere are the reference sites for [`matplotlib`](https://matplotlib.org/) and [`numpy`](http://www.numpy.org/)\n\nNote: The plt.rcParams calls are tools for making the plots larger along with the fonts that are used in the labeling. These settings seem to give acceptable plots.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nplt.rcParams[\"figure.figsize\"] = (20,10)\nplt.rcParams.update({'font.size': 22})\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Defining constants\n\nIn this problem it seems prudent to allow for a variety of 'toast' like objects of different materials and sizes. To that end I want to establish a set of constants that describe the particular setting I am considering. Note that I am working in cm and s for convenience which may turn out to be a bad idea. These variables define the physical form of the toast. Modeling parameters will be solicited from the user allowing a different way to change features.\n\ntlength (parallel to table edge - cm) \ntwidth (perpendicular to table edge - cm) \ntthick (yup - cm) \ntdensity (in case it matters - gr/cm<sup>3</sup>) \ncounterheight ( in cm) \ngravity (cm/s<sup>2</sup>) \nanglimit (radians - generally set to $\\pi/2$ but could be different)\n\nIn this model the toast will be slipping which leads to some component of downward velocity as the toast leaves the edge of the table. This leads to a more complex calculation for the time to fall to the floor which depends on the results of the model. That is why the floortime calculation from the previous models has been removed at this stage.\n\nCalculations in this cell are constant regardless of other variations in the problem parameters.\n", "_____no_output_____" ] ], [ [ "tlength = 10.0\ntwidth = 10.0\ntthick = 1.0\ntdensity = 0.45\ncounterheight = 100.0\ngravity = 981.0\nanglimit = np.pi/2\n\nlindensity = tdensity*tlength*tthick # linear density of toast\ntmass = lindensity*twidth # mass of toast\ntinertiacm = tmass*(twidth*twidth)/12.0 # moment of inertia around CM\ntinertiamax = tmass*(twidth*twidth)/3.0 # max moment around edge of 'book'\n\n# debug\n# print (\"Toast mass (gr): %.2f gr\"% (tmass))\n# print (\"Inertia around CM: %.2f gr*cm^2\" % (tinertiacm))\n# print (\"Max inertia around edge: %.2f gr*cm^2\" % (tinertiamax))", "_____no_output_____" ] ], [ [ "## Updated Freebody Diagram\n\nSince I know from experiment that the toast rotates almost exactly a full $2\\pi$ if I start it hanging 3/4 of it's width over the edge that would mean that the rotational velocity when the toast disconnects from the table is around 12 rad/s (since the fall time is abut 0.5 s). The previous notebook and the plots therein suggest that the toast reaches that velocity when it has rotated a little less than 1 radian. \n\nAnalysis of the previous model raises the question of how slipping of the toast might affect the model. This will lead to a more complex model for sure so it seemed prudent to develop a more explicit freebody diagram using CAD software to keep variables clear. \n\nSo, here's the freebody diagram with a host of 'new' labels that represent my current thinking.\n\n<img src=\"images/toastFB.png\" alt=\"Freebody Diagram\" title=\"Toast Freebody\" />\n", "_____no_output_____" ], [ "## Rerun from here!!\n\nWhen I wish to rerun this model with different parameters this is where I start....\n\n### Set tstep and numit\n\nTo explore tools for interacting with the python code I am choosing to set the time step (tstep) and the maximum number of iterations (numit) as inputs from the user. This link from [`stackoverflow`](https://stackoverflow.com/questions/20449427/how-can-i-read-inputs-as-numbers) does the best job of explaining how to use the input() command in python 3.x which is the version I am using. This hopefully explains the format of the input statements....\n\n```python\ntstep = float(input(\"Define time step in ms (ms)? \"))\nnumit = int(input(\"How many interations? \"))\noverhang = float(input(\"What is the initial overhang of the toast (% as in 1.0 = 100%)? \"))\ncoeffric = float(input(\"What is the coefficient of friction? \"))\n```\nThe time step (tstep) can be fractions of ms if I want while the number of iterations (numit) but conceptually be an integer (It doesn't make much sense to repeat a process 11.3 times in this context). The overhang is the initial overhang of the toast (no sliding across the table yet) and the coefficient of friction is needed to handle slipping of the toast.", "_____no_output_____" ] ], [ [ "# Solicit model parameters from user.....\ntstep = float(input(\"Define time step in ms (ms)? \"))\nnumit = int(input(\"How many interations? \"))\noverhang = float(input(\"What is the initial overhang of the toast (% as in 1.0 = 100%)? \"))\ncoeffric = float(input(\"What is the coefficient of friction? \"))\nprint(\"Overhang is %.3f and the coefficient of friction is %.2f .\"% (overhang, coeffric))\nprint(\"time step is %.2f ms and the number of iterations is %s.\"% (tstep, numit))\nprint(\"Rerun this cell to change these values and then rerun the calculations. \")", "Define time step in ms (ms)? 3\nHow many interations? 10\nWhat is the initial overhang of the toast (% as in 1.0 = 100%)? 1\nWhat is the coefficient of friction? .2\n" ] ], [ [ "### Set up variable arrays\n\nGetting these arrays set up is a little bit of an iterative process itself. I set up all the arrays I think I need and invariably I find later that I need several others. Some of that process will be hidden so I apologise. I started out this just a giant list of arrays but later decided I needed to group them in a way that would help visualize how they contribute to the calculation. Much of this is very similar to the previous model.", "_____no_output_____" ] ], [ [ "# Define variable arrays needed\n# time variables\ncount = np.linspace(0,numit,num=numit+1) # start at 0, go to numit, because it started at there is 1 more element in the array\ncurrenttime = np.full_like(count,0.0) # same size as count will all values = 0 for starters\n\n# moment of inertia variables\ndparallel = np.full_like(count,0.0) # distance to pivot from center of mass (CM)\ntinertianow = np.full_like(count,0.0) # moment of inertia from parallel axis theorem\n\n# rotation variables\nangaccel = np.full_like(count,0.0) # current angular acceleration\nangvel = np.full_like(count,0.0) # current angular velocity\nangpos = np.full_like(count,0.0) # current angular position\ntorqpos = np.full_like(count,0.0) # torque from overhanging 'right' side of toast\ntorqneg = np.full_like(count,0.0) # torque from 'left' side of toast still over the table\ntorqnet = np.full_like(count,0.0) # net torque\n\n# general location of cm variables\nrside = np.full_like(count,0.0) # length of toast hanging out over edge\nlside = np.full_like(count,0.0) # length of toast to left of edge\n\n# torque calculation variables\narmr = np.full_like(count,0.0) # moment arm of overhanging toast\narml = np.full_like(count,0.0) # moment arm of toast left of pivot\nweightr = np.full_like(count,0.0) # weight of overhanging toast acting at armr/2\nweightl = np.full_like(count,0.0) # weight of 'left' side of toast acting at arml/2\n\n# slipping variables\nfriction = np.full_like(count,0.0) # friction at pivot\nlatgforce = np.full_like(count,0.0) # force seeking to slide toast off\nparallelaccel = np.full_like(count,0.0) # acceleration parallel to plane of toast\n\n# These arrays had to be added later as I needed to deal with the toast slipping off the edge\nslipdisplace = np.full_like(count,0.0) # displacement of toast in this interation\nslipposx = np.full_like(count,0.0) # position of CM of toast in x\nslipposy = np.full_like(count,0.0) # position of CM of toast in y\nslipveltot = np.full_like(count,0.0) # total velocity at iteration\nslipvelx = np.full_like(count,0.0) # velocity of CM in x direction\nslipvely = np.full_like(count,0.0) # velocity of CM in y direction\n\n# kinematic coefficients \nquadcoef = np.zeros(3) # needed to invoke the python polynomial roots solver.", "_____no_output_____" ] ], [ [ "### Initialize the arrays....\n\nIn the process of taking my original notebook apart and creating separate notebooks for each model I am finding that I can do this in a more understandable way than I did the first time around. Feel free to look back at the orginal notebook which I abandoned when it got too cumbersome.\n\nEach time I perform a set of calculations I will start by considering where it is now and whether it is slipping or not. That means the next step in the iteration only depends on the previous step and some constants. Because of this I only need to establish the first value in each of the arrays. What is the value of each variable when this process starts. Note that all array values except count[] have been set to 0 so any variables whose initial value should be 0 have been commented out.\n\nSee previous models for details of calculating the moment of inertia using the parallel axis theorem.", "_____no_output_____" ] ], [ [ "# Set first term of each variable\n# time variables\n# count : count is aready completely filled from 0 to numit\n# currenttime[0] is already set to 0\n\n# general location of cm variables\nrside[0] = twidth*overhang\nlside[0] = twidth-rside[0]\n\n# torque calculation variables\narmr[0] = rside[0]/2.0\narml[0] = lside[0]/2.0\nweightr[0] = lindensity*rside[0]*gravity # weight of overhang\nweightl[0] = lindensity*lside[0]*gravity # weight over table\n\n# moment of inertia variables\ndparallel[0] = rside[0] - twidth/2. # value changes if slipping\ntinertianow[0] = tinertiacm + tmass*dparallel[0]**2 \n\n# rotation variables\n#angvel[0] is already set to 0\n#angpos[0] is already set to 0\ntorqpos[0] = (overhang*twidth/2)*(tmass*overhang*gravity)\ntorqneg[0] = -((1.0-overhang)*twidth/2)*(tmass*(1.0-overhang)*gravity)\ntorqnet[0] = torqpos[0]+torqneg[0]\nangaccel[0] = torqnet[0]/tinertianow[0]\n\n# slipping variables\n# friction[0] is already set to 0\n# latgforce[0] is already set to 0\n# parallelaccel[0] is already set to 0\n\n# These arrays had to be added later as I needed to deal with the toast slipping off the edge\n# slipdisplace[0] is already set to 0\nslipposx[0] = rside[0]- twidth/2.0 # CM relative to pivot due to overhang\n# slipposy[0] is already set to 0\n# slipveltot[0] is already set to 0\n# slipvelx[0] is already set to 0\n# slipvely[0] is already set to 0\n\n# kinematic coefficients \n# quadcoef[] depend on conditions when toast leaves edge", "_____no_output_____" ] ], [ [ "### ...same calculation but using variables differently.....\n\nI still need to calculate torqpos and torqneg but these will be based on my new nomenclature that tries to make it more explicit how the torques are calculated as well as the normal force on the corner and the friction generated.\n\nOne of the features I have NOT dealt with yet is that the moment of inertia will change once the toast starts sliding. I'm going to let that go for now and merely calculate the normal, friction, and lateral forces on the toast to see at what point it might start to slide. Then I will worry about how to recalculate the moment of inertia after I do a first test. \n\nLook at the analysis section immediately following for discussion of how this process developed......\n\n### When it starts to slip...(initially ignored)\n\nWhen the toast starts to slip things get complicated in a hurry. Perhaps most obviously the pivot point starts to move which means all of the torques and moment arms change as well as the moment of intertia. That will all be sort of straightforward. Tracking the motion of the toast as it slides off the edge seems painful since the acceleration and velocity will be in a slightly different direction with each successive iteration. Yikes.....\n\nRemember that python keeps track of loops and other programming features through the indents in the code. All of this part of the model will need to take place inside the 'if-else' conditional test' part way through the calculation. To be more specific it is the 'else' part of the conditional test where all the action has to happen", "_____no_output_____" ] ], [ [ "ndex1 = 0\nwhile (ndex1 < numit) and (angpos[ndex1] < anglimit):\n print(\"iteration: \",ndex1)\n \n # These calculations take place in every iteration regardless of whether it's slipping or not.\n # moment of inertia NOW - ndex1 \n dparallel[ndex1] = rside[ndex1] - twidth/2. # value changes if slipping\n tinertianow[ndex1] = tinertiacm + tmass*dparallel[ndex1]**2 \n \n # torqnet NOW - ndex1\n torqpos[ndex1] = np.cos(angpos[ndex1])*armr[ndex1]*weightr[ndex1]\n torqneg[ndex1] = -np.cos(angpos[ndex1])*arml[ndex1]*weightl[ndex1]\n torqnet[ndex1] = torqpos[ndex1] + torqneg[ndex1]\n \n # angular acceleration NOW -ndex1\n angaccel[ndex1] = torqnet[ndex1]/tinertianow[ndex1] \n \n # NEXT position and velocity after tstep - ndex1+1 \n angvel[ndex1+1] = angvel[ndex1] + angaccel[ndex1]*(tstep/1000.0)\n angpos[ndex1+1] = angpos[ndex1] + angvel[ndex1]*(tstep/1000.0) + 0.5*angaccel[ndex1]*(tstep/1000.0)*(tstep/1000.0)\n currenttime[ndex1+1] = currenttime[ndex1] + tstep\n \n # determine if the toast is slipping\n # calculate normal, friction, and lateral forces NOW - ndex1\n currentnormal = (weightr[ndex1] + weightl[ndex1])*np.cos(angpos[ndex1])\n friction[ndex1] = currentnormal*coeffric\n latgforce[ndex1] = (weightr[ndex1] + weightl[ndex1])*np.sin(angpos[ndex1]) \n parallelaccel[ndex1] = (latgforce[ndex1] - friction[ndex1])/(tmass)\n\n # This is where I have to deal with the toast slipping. When the parallelaccel > 0\n # then the toast is starting to slip. \n \n if parallelaccel[ndex1] < 0.0: # NOT slipping\n parallelaccel[ndex1] = 0.0\n # update variables for next step = ndex1+1\n rside[ndex1+1] = rside[ndex1]\n lside[ndex1+1] = twidth - rside[ndex1+1]\n armr[ndex1+1] = rside[ndex1+1]/2.0\n arml[ndex1+1] = lside[ndex1+1]/2.0\n weightr[ndex1+1] = lindensity*rside[ndex1+1]*gravity # weight of overhang\n weightl[ndex1+1] = lindensity*lside[ndex1+1]*gravity # weight over table\n slipangle = angpos[ndex1+1] # keep updating the slip angle until is starts slipping. \n \n else:\n print(\"Toast is slipping!!; ndex1: \", ndex1)\n # determine NEXT sliding velocity - ndex1+1\n slipvelx[ndex1+1] = slipvelx[ndex1] + np.cos(angpos[ndex1])*parallelaccel[ndex1]*tstep/1000.\n slipvely[ndex1+1] = slipvely[ndex1] - np.sin(angpos[ndex1])*parallelaccel[ndex1]*tstep/1000.\n slipveltot[ndex1+1] = np.sqrt(slipvelx[ndex1+1]**2 + slipvely[ndex1+1]**2)\n \n # determine NEXT slid position - ndex1+1\n slipposx[ndex1+1] = slipposx[ndex1] + slipvelx[ndex1+1]*tstep/1000.\n slipposy[ndex1+1] = slipposy[ndex1] + slipvely[ndex1+1]*tstep/1000.\n slipdisplace[ndex1+1] = np.sqrt(slipposx[ndex1+1]**2 + slipposy[ndex1+1]**2)\n \n # find NEXT overhang, this affects the moment of inertia - ndex1+1\n rside[ndex1+1] = rside[ndex1]+slipdisplace[ndex1+1]\n lside[ndex1+1] = twidth - rside[ndex1+1]\n weightr[ndex1+1] = lindensity*rside[ndex1+1]*gravity # weight of overhang\n weightl[ndex1+1] = lindensity*lside[ndex1+1]*gravity # weight over table\n \n # debugging help\n # print(\"lateral accel (cm/s^2) : \", parallelaccel[ndex1])\n # print(\"lateral g force: \", latgforce[ndex1])\n # print(\"currenttime: \", currenttime[ndex1])\n # print(\"velx: %.3f vely %.3f posx %.3f posy %.3f \" % (slipvelx[ndex1],slipvely[ndex1],slipposx[ndex1],slipposy[ndex1]))\n # print(\"slip velocity %.3f slip displacement %.3f \" % (slipveltot[ndex1],slipdisplace[ndex1]))\n # inputcont = input(\"continue?\") \n \n \n # debugging help\n # print(\"Tpos: %.3f Tneg %.3f Ttot %.3f angaccel %.3f \" % (torqpos2[ndex4],torqneg2[ndex4],torqtot[ndex4],angaccel2[ndex4]))\n # print(\"cos(angpos): \", np.cos(angpos2[ndex2]))\n # print(\"pos %.3f pos+ %.3f vel %.3f vel+ %.3f accel %.3f \" % (angpos2[ndex4],angpos2[ndex4+1],angvel2[ndex4],angvel2[ndex4+1],angaccel2[ndex4]))\n # inputcont = input(\"continue?\") \n \n # test for end point of rotation\n \n if angpos[ndex1+1] > (np.pi/2.0):\n ndex1 = ndex1 + 1\n print (\"Got to 90 degrees at ndex1: \", ndex1)\n break # get out of the loop\n \n ndex1 = ndex1 +1 # go to the next time increment\n \nndexfinal = ndex1\nprint(\"final index: \", ndex1)\nprint(\"Tpos: %.3f Tneg %.3f Ttot %.3f angaccel %.3f : torque 0.0 and angaccel 0.0\" % (torqpos[ndex1],torqneg[ndex1],torqnet[ndex1],angaccel[ndex1]))\nprint(\"pos %.3f vel %.3f : angular position 1.55ish\" % (angpos[ndex1],angvel[ndex1]))\nprint(\"Angle at which slipping begins is %.3f radians\" % (slipangle))", "iteration: 0\niteration: 1\niteration: 2\niteration: 3\niteration: 4\niteration: 5\niteration: 6\niteration: 7\niteration: 8\niteration: 9\nfinal index: 10\nTpos: 0.000 Tneg 0.000 Ttot 0.000 angaccel 0.000 : torque 0.0 and angaccel 0.0\npos 0.066 vel 4.413 : angular position 1.55ish\nAngle at which slipping begins is 0.066 radians\n" ] ], [ [ "### Plot lateral g force and friction to see crossover point.....\n\nThis introduces a different plotting requirement. I'm looking to understand where in the process the frictional force falls below the lateral g force resulting in the toast slipping. In previous dual plot I allowed the plot routines to set the scales on the vertical axes internally. Now I need to make sure both variable share the same vertical axis scale so the visual crossover point is in fact what I'm looking for. The first time I did this it looked good but because the scales on the left and right weren't the same it was misleading.", "_____no_output_____" ] ], [ [ "plt.plot(currenttime, latgforce, color = \"blue\", label = \"lateral g force\")\nplt.plot(currenttime, friction, color = \"red\", label = \"friction\")\nplt.title(\"Is it slipping?\")\nplt.ylabel(\"force\");\nplt.legend();", "_____no_output_____" ] ], [ [ "### Analysis\n\nThe first time I ran the analysis above with the possibility of slipping I screwed up the cos/sin thing and it started slipping right away. Fixed that and then it began slipping, with a coefficient of friction of 0.4, at the 6th interation (60 ms). I increased the coefficient of friction to 0.8 and it went up to the 8th interation before slipping. This is qualitatively what one would expect. Interestingly if you go back to the rotation speed plot it seems hopeful that if the toast starts to slide around 60-80 ms that would significantly reduce it's rotational velocity as it starts to fall which is what would be consistent with the experimental evidence.\n\nNow I need to go back and build in the impact of the slipping which will be a bit of a pain. The discussion for this is back a few cells.\n\nIt feels like I have the slipping part working appropriately now. If I increase the coefficient of friction the angle at which it starts to slip is higher AND the final angular velocity is higher by a little.", "_____no_output_____" ], [ "### New Drop time\n\nTo get the rotation of the toast I need to calculate the drop time taking into account that because of slipping (and rotation actually) the toast has some downward velocity when it comes off the edge of the counter. That will slightly reduce the drop time.", "_____no_output_____" ] ], [ [ "quadcoef[0] = -gravity/2.0\nquadcoef[2] = counterheight\nquadcoef[1] = slipvely[ndexfinal-1]\n\ndroptime = np.roots(quadcoef)\n\nif droptime[0] > 0.0: # assume 2 roots and only one is positive....could be a problem\n finalrotation = droptime[0]*angvel[ndexfinal]\n timetofloor = droptime[0]\nelse:\n finalrotation = droptime[1]*angvel[ndexfinal]\n timetofloor = droptime[1]\n\nprint(\"Final Report:\")\nprint(\"Final Rotation at Floor (rad): \", finalrotation)\nprint(\"Angular velocity coming off the table (rad/s):\", angvel[ndexfinal])\nprint(\"Time to reach floor (s):\", timetofloor)\nprint(\"Initial overhang (%):\", overhang)\nprint(\"Coefficient of Friction:\", coeffric)\nprint(\"Angle at which slipping started (rad):\", slipangle)\nprint(\"Time until comes off edge (ms): \", currenttime[ndexfinal])\nprint()\nprint()\n\n# debug\nprint(\"coef 0 (t^2): \", quadcoef[0])\nprint(\"coef 1 (t): \", quadcoef[1])\nprint(\"coef 2 (const):\", quadcoef[2])\nprint(\"root 1: \", droptime[0])\nprint(\"root 2: \", droptime[1])", "Final Report:\nFinal Rotation at Floor (rad): 6.387420535890989\nAngular velocity coming off the table (rad/s): 15.015067546165607\nTime to reach floor (s): 0.4254007193941757\nInitial overhang (%): 0.75\nCoefficient of Friction: 0.8\nAngle at which slipping started (rad): 0.7006976411024474\nTime until comes off edge (ms): 150.0\n\n\ncoef 0 (t^2): -490.5\ncoef 1 (t): -26.413422196467003\ncoef 2 (const): 100.0\nroot 1: -0.47925071367851224\nroot 2: 0.4254007193941757\n" ] ], [ [ "### Analysis\n\nResults from this reworked version of the model are quite sensitive to the coefficient of friction. With a $\\mu$ of 0.8 the predicted rotation is 6.4 radians but with a $\\mu$ of 0.3 the predicted rotation is roughly $3/2\\pi$ which is less than a full rotation. It strikes me that it would be interesting to print out the angle at which slipping starts since this is a static calculation and could be compared to experiment. Generally though this model produces results that are consistent with observation. \n\nMy next step will be to go back and capture the angle at which slipping happens and see how that compares to some quick experiments. I also adjust the output of the last cell to provide a more useful summary of the outcome.\n\n### Next steps....\n\nWhat feels like the next step is to wrap this calculation in another loop so that I don't have to try out a range of different coefficients of friction. I could then plot the final rotation angle as function of the coefficient of friction for different overhangs. That would produce a fascinating plot....\n\nI also want to try and figure out how hard it would be to animate this thing....hmmmm", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d058ce35b5d1959d675ea4502358e02c589c8987
40,620
ipynb
Jupyter Notebook
Seminar9/Bonus-seminar.ipynb
Omrigan/dl-course
caae747c4178259e3db90993afc2f4738ea7c1a5
[ "MIT" ]
122
2015-03-30T09:31:04.000Z
2022-03-09T10:16:26.000Z
Seminar9/Bonus-seminar.ipynb
v-mk-s/dl-course
caae747c4178259e3db90993afc2f4738ea7c1a5
[ "MIT" ]
1
2017-08-27T22:07:39.000Z
2017-08-27T22:07:39.000Z
Seminar9/Bonus-seminar.ipynb
v-mk-s/dl-course
caae747c4178259e3db90993afc2f4738ea7c1a5
[ "MIT" ]
70
2015-02-13T19:50:45.000Z
2021-01-14T06:09:44.000Z
37.506925
7,396
0.615805
[ [ [ "# Deep learning for Natural Language Processing\n\n\n * Simple text representations, bag of words\n * Word embedding and... not just another word2vec this time\n * 1-dimensional convolutions for text\n * Aggregating several data sources \"the hard way\"\n * Solving ~somewhat~ real ML problem with ~almost~ end-to-end deep learning\n \n\nSpecial thanks to Irina Golzmann for help with technical part.", "_____no_output_____" ], [ "# NLTK\n\nYou will require nltk v3.2 to solve this assignment\n\n__It is really important that the version is 3.2, otherwize russian tokenizer might not work__\n\nInstall/update\n* `sudo pip install --upgrade nltk==3.2`\n* If you don't remember when was the last pip upgrade, `sudo pip install --upgrade pip`\n\nIf for some reason you can't or won't switch to nltk v3.2, just make sure that russian words are tokenized properly with RegeExpTokenizer.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Dataset\n\nEx-kaggle-competition on job salary prediction\n![img](http://www.kdnuggets.com/images/cartoon-data-scientist-salary-negotiation.gif)\n\nOriginal conest - https://www.kaggle.com/c/job-salary-prediction\n\n\n### Download\nGo [here](https://www.kaggle.com/c/job-salary-prediction) and download as usual\n\nCSC cloud: data should already be here somewhere, just poke the nearest instructor.\n\n\n\n\n# What's inside\nDifferent kinds of features:\n* 2 text fields - title and description\n* Categorical fields - contract type, location\n\nOnly 1 binary target whether or not such advertisement contains prohibited materials\n* criminal, misleading, human reproduction-related, etc\n* diving into the data may result in prolonged sleep disorders", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"./Train_rev1.csv\",sep=',')", "_____no_output_____" ], [ "print df.shape, df.SalaryNormalized.mean()\ndf[:5]", " (244768, 12) 34122.5775755\n" ] ], [ [ "# Tokenizing\n\nFirst, we create a dictionary of all existing words.\nAssign each word a number - it's Id", "_____no_output_____" ] ], [ [ "from nltk.tokenize import RegexpTokenizer\nfrom collections import Counter,defaultdict\ntokenizer = RegexpTokenizer(r\"\\w+\")\n\n#Dictionary of tokens\ntoken_counts = Counter()\n\n#All texts\nall_texts = np.hstack([df.FullDescription.values,df.Title.values])\n\n\n#Compute token frequencies\nfor s in all_texts:\n if type(s) is not str:\n continue\n s = s.decode('utf8').lower()\n tokens = tokenizer.tokenize(s)\n for token in tokens:\n token_counts[token] +=1\n", "_____no_output_____" ] ], [ [ "### Remove rare tokens\n\nWe are unlikely to make use of words that are only seen a few times throughout the corpora.\n\nAgain, if you want to beat Kaggle competition metrics, consider doing something better.", "_____no_output_____" ] ], [ [ "#Word frequency distribution, just for kicks\n_=plt.hist(token_counts.values(),range=[0,50],bins=50)", "_____no_output_____" ], [ "#Select only the tokens that had at least 10 occurences in the corpora.\n#Use token_counts.\n\nmin_count = 5\ntokens = <tokens from token_counts keys that had at least min_count occurences throughout the dataset>\n", "_____no_output_____" ], [ "token_to_id = {t:i+1 for i,t in enumerate(tokens)}\nnull_token = \"NULL\"\ntoken_to_id[null_token] = 0\n", "_____no_output_____" ], [ "print \"# Tokens:\",len(token_to_id)\nif len(token_to_id) < 10000:\n print \"Alarm! It seems like there are too few tokens. Make sure you updated NLTK and applied correct thresholds -- unless you now what you're doing, ofc\"\nif len(token_to_id) > 100000:\n print \"Alarm! Too many tokens. You might have messed up when pruning rare ones -- unless you know what you're doin' ofc\"", "# Tokens: 44867\n" ] ], [ [ "### Replace words with IDs\nSet a maximum length for titles and descriptions.\n * If string is longer that that limit - crop it, if less - pad with zeros.\n * Thus we obtain a matrix of size [n_samples]x[max_length]\n * Element at i,j - is an identifier of word j within sample i", "_____no_output_____" ] ], [ [ "def vectorize(strings, token_to_id, max_len=150):\n token_matrix = []\n for s in strings:\n if type(s) is not str:\n token_matrix.append([0]*max_len)\n continue\n s = s.decode('utf8').lower()\n tokens = tokenizer.tokenize(s)\n token_ids = map(lambda token: token_to_id.get(token,0), tokens)[:max_len]\n token_ids += [0]*(max_len - len(token_ids))\n token_matrix.append(token_ids)\n\n return np.array(token_matrix)", "_____no_output_____" ], [ "desc_tokens = vectorize(df.FullDescription.values,token_to_id,max_len = 500)\ntitle_tokens = vectorize(df.Title.values,token_to_id,max_len = 15)", "_____no_output_____" ] ], [ [ "### Data format examples\n", "_____no_output_____" ] ], [ [ "print \"Matrix size:\",title_tokens.shape\nfor title, tokens in zip(df.Title.values[:3],title_tokens[:3]):\n print title,'->', tokens[:10],'...'", "Размер матрицы: (244768, 15)\nEngineering Systems Analyst -> [38462 12311 1632 0 0 0 0 0 0 0] ...\nStress Engineer Glasgow -> [19749 41620 5861 0 0 0 0 0 0 0] ...\nModelling and simulation analyst -> [23387 16330 32144 1632 0 0 0 0 0 0] ...\n" ] ], [ [ "__ As you can see, our preprocessing is somewhat crude. Let us see if that is enough for our network __", "_____no_output_____" ], [ "# Non-sequences\n\n\nSome data features are categorical data. E.g. location, contract type, company\n\nThey require a separate preprocessing step.", "_____no_output_____" ] ], [ [ "#One-hot-encoded category and subcategory\n\nfrom sklearn.feature_extraction import DictVectorizer\n\ncategories = []\ndata_cat = df[[\"Category\",\"LocationNormalized\",\"ContractType\",\"ContractTime\"]]\n\n\ncategories = [A list of dictionaries {\"category\":category_name, \"subcategory\":subcategory_name} for each data sample]\n\n \n\nvectorizer = DictVectorizer(sparse=False)\ndf_non_text = vectorizer.fit_transform(categories)\ndf_non_text = pd.DataFrame(df_non_text,columns=vectorizer.feature_names_)\n\n\n", "_____no_output_____" ] ], [ [ "# Split data into training and test", "_____no_output_____" ] ], [ [ "#Target variable - whether or not sample contains prohibited material\ntarget = df.is_blocked.values.astype('int32')\n#Preprocessed titles\ntitle_tokens = title_tokens.astype('int32')\n#Preprocessed tokens\ndesc_tokens = desc_tokens.astype('int32')\n#Non-sequences\ndf_non_text = df_non_text.astype('float32')", "_____no_output_____" ], [ "\n#Split into training and test set.\n\n\n#Difficulty selector:\n#Easy: split randomly\n#Medium: split by companies, make sure no company is in both train and test set\n#Hard: do whatever you want, but score yourself using kaggle private leaderboard\n\ntitle_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = <define_these_variables>\n\n", "_____no_output_____" ] ], [ [ "## Save preprocessed data [optional]\n\n* The next tab can be used to stash all the essential data matrices and get rid of the rest of the data.\n * Highly recommended if you have less than 1.5GB RAM left\n* To do that, you need to first run it with save_prepared_data=True, then restart the notebook and only run this tab with read_prepared_data=True.", "_____no_output_____" ] ], [ [ "\nsave_prepared_data = True #save\nread_prepared_data = False #load\n\n#but not both at once\nassert not (save_prepared_data and read_prepared_data)\n\n\nif save_prepared_data:\n print \"Saving preprocessed data (may take up to 3 minutes)\"\n\n import pickle\n with open(\"preprocessed_data.pcl\",'w') as fout:\n pickle.dump(data_tuple,fout)\n with open(\"token_to_id.pcl\",'w') as fout:\n pickle.dump(token_to_id,fout)\n\n print \"done\"\n \nelif read_prepared_data:\n print \"Reading saved data...\"\n \n import pickle\n \n with open(\"preprocessed_data.pcl\",'r') as fin:\n data_tuple = pickle.load(fin)\n title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = data_tuple\n with open(\"token_to_id.pcl\",'r') as fin:\n token_to_id = pickle.load(fin)\n\n\n \n #Re-importing libraries to allow staring noteboook from here\n import pandas as pd\n import numpy as np\n import matplotlib.pyplot as plt\n %matplotlib inline\n\n \n print \"done\"\n ", "_____no_output_____" ] ], [ [ "# Train the monster\n\nSince we have several data sources, our neural network may differ from what you used to work with.\n\n* Separate input for titles\n * cnn+global max or RNN\n* Separate input for description\n * cnn+global max or RNN\n* Separate input for categorical features\n * Few dense layers + some black magic if you want\n \nThese three inputs must be blended somehow - concatenated or added.\n\n* Output: a simple regression task ", "_____no_output_____" ] ], [ [ "#libraries\nimport lasagne\nfrom theano import tensor as T\nimport theano", "/usr/local/lib/python2.7/dist-packages/Theano-0.8.0rc1-py2.7.egg/theano/tensor/signal/downsample.py:5: UserWarning: downsample module has been moved to the pool module.\n warnings.warn(\"downsample module has been moved to the pool module.\")\n" ], [ "#3 inputs and a refere output\ntitle_token_ids = T.matrix(\"title_token_ids\",dtype='int32')\ndesc_token_ids = T.matrix(\"desc_token_ids\",dtype='int32')\ncategories = T.matrix(\"categories\",dtype='float32')\ntarget_y = T.vector(\"is_blocked\",dtype='float32')", "_____no_output_____" ] ], [ [ "# NN architecture", "_____no_output_____" ] ], [ [ "title_inp = lasagne.layers.InputLayer((None,title_tr.shape[1]),input_var=title_token_ids)\ndescr_inp = lasagne.layers.InputLayer((None,desc_tr.shape[1]),input_var=desc_token_ids)\ncat_inp = lasagne.layers.InputLayer((None,nontext_tr.shape[1]), input_var=categories)\n", "_____no_output_____" ], [ "# Descriptions\n\n#word-wise embedding. We recommend to start from some 64 and improving after you are certain it works.\n\ndescr_nn = lasagne.layers.EmbeddingLayer(descr_inp,\n input_size=len(token_to_id)+1,\n output_size=?)\n\n\n#reshape from [batch, time, unit] to [batch,unit,time] to allow 1d convolution over time\ndescr_nn = lasagne.layers.DimshuffleLayer(descr_nn, [0,2,1])\n\ndescr_nn = 1D convolution over embedding, maybe several ones in a stack\n\n#pool over time\ndescr_nn = lasagne.layers.GlobalPoolLayer(descr_nn,T.max)\n\n#Possible improvements here are adding several parallel convs with different filter sizes or stacking them the usual way\n#1dconv -> 1d max pool ->1dconv and finally global pool \n\n\n# Titles\ntitle_nn = <Process titles somehow (title_inp)>\n\n# Non-sequences\ncat_nn = <Process non-sequences(cat_inp)>\n", "_____no_output_____" ], [ "nn = <merge three layers into one (e.g. lasagne.layers.concat) > \n\nnn = lasagne.layers.DenseLayer(nn,your_lucky_number)\nnn = lasagne.layers.DropoutLayer(nn,p=maybe_use_me)\nnn = lasagne.layers.DenseLayer(nn,1,nonlinearity=lasagne.nonlinearities.linear)", "_____no_output_____" ] ], [ [ "# Loss function\n\n* The standard way:\n * prediction\n * loss\n * updates\n * training and evaluation functions\n \n", "_____no_output_____" ] ], [ [ "#All trainable params\nweights = lasagne.layers.get_all_params(nn,trainable=True)", "_____no_output_____" ], [ "#Simple NN prediction\nprediction = lasagne.layers.get_output(nn)[:,0]\n\n#loss function\nloss = lasagne.objectives.squared_error(prediction,target_y).mean()\n\n", "_____no_output_____" ], [ "#Weight optimization step\nupdates = <your favorite optimizer>", "_____no_output_____" ] ], [ [ "### Determinitic prediction \n * In case we use stochastic elements, e.g. dropout or noize\n * Compile a separate set of functions with deterministic prediction (deterministic = True)\n * Unless you think there's no neet for dropout there ofc. Btw is there?", "_____no_output_____" ] ], [ [ "#deterministic version\ndet_prediction = lasagne.layers.get_output(nn,deterministic=True)[:,0]\n\n#equivalent loss function\ndet_loss = <an excercise in copy-pasting and editing>\n", "_____no_output_____" ] ], [ [ "### Coffee-lation", "_____no_output_____" ] ], [ [ "train_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[loss,prediction],updates = updates)\neval_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[det_loss,det_prediction])", "_____no_output_____" ] ], [ [ "# Training loop\n* The regular way with loops over minibatches\n* Since the dataset is huge, we define epoch as some fixed amount of samples isntead of all dataset", "_____no_output_____" ] ], [ [ "# Out good old minibatch iterator now supports arbitrary amount of arrays (X,y,z)\n\ndef iterate_minibatches(*arrays,**kwargs):\n \n batchsize=kwargs.get(\"batchsize\",100)\n shuffle = kwargs.get(\"shuffle\",True)\n \n if shuffle:\n indices = np.arange(len(arrays[0]))\n np.random.shuffle(indices)\n for start_idx in range(0, len(arrays[0]) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield [arr[excerpt] for arr in arrays]\n \n", "_____no_output_____" ] ], [ [ "### Tweaking guide\n\n* batch_size - how many samples are processed per function call\n * optimization gets slower, but more stable, as you increase it.\n * May consider increasing it halfway through training\n* minibatches_per_epoch - max amount of minibatches per epoch\n * Does not affect training. Lesser value means more frequent and less stable printing\n * Setting it to less than 10 is only meaningfull if you want to make sure your NN does not break down after one epoch\n* n_epochs - total amount of epochs to train for\n * `n_epochs = 10**10` and manual interrupting is still an option\n\n\nTips:\n\n* With small minibatches_per_epoch, network quality may jump up and down for several epochs\n\n* Plotting metrics over training time may be a good way to analyze which architectures work better.\n\n* Once you are sure your network aint gonna crash, it's worth letting it train for a few hours of an average laptop's time to see it's true potential", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error,mean_absolute_error\n\n\nn_epochs = 100\nbatch_size = 100\nminibatches_per_epoch = 100\n\n\nfor i in range(n_epochs):\n \n #training\n epoch_y_true = []\n epoch_y_pred = []\n \n b_c = b_loss = 0\n for j, (b_desc,b_title,b_cat, b_y) in enumerate(\n iterate_minibatches(desc_tr,title_tr,nontext_tr,target_tr,batchsize=batch_size,shuffle=True)):\n if j > minibatches_per_epoch:break\n \n loss,pred_probas = train_fun(b_desc,b_title,b_cat,b_y)\n \n b_loss += loss\n b_c +=1\n \n epoch_y_true.append(b_y)\n epoch_y_pred.append(pred_probas)\n\n \n epoch_y_true = np.concatenate(epoch_y_true)\n epoch_y_pred = np.concatenate(epoch_y_pred)\n \n print \"Train:\"\n print '\\tloss:',b_loss/b_c\n print '\\trmse:',mean_squared_error(epoch_y_true,epoch_y_pred)**.5\n print '\\tmae:',mean_absolute_error(epoch_y_true,epoch_y_pred)\n \n \n #evaluation\n epoch_y_true = []\n epoch_y_pred = []\n b_c = b_loss = 0\n for j, (b_desc,b_title,b_cat, b_y) in enumerate(\n iterate_minibatches(desc_ts,title_ts,nontext_ts,target_ts,batchsize=batch_size,shuffle=True)):\n if j > minibatches_per_epoch: break\n loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)\n \n b_loss += loss\n b_c +=1\n \n epoch_y_true.append(b_y)\n epoch_y_pred.append(pred_probas)\n\n \n epoch_y_true = np.concatenate(epoch_y_true)\n epoch_y_pred = np.concatenate(epoch_y_pred)\n \n print \"Val:\"\n print '\\tloss:',b_loss/b_c\n print '\\trmse:',mean_squared_error(epoch_y_true,epoch_y_pred)**.5\n print '\\tmae:',mean_absolute_error(epoch_y_true,epoch_y_pred)\n", "_____no_output_____" ], [ "print \"If you are seeing this, it's time to backup your notebook. No, really, 'tis too easy to mess up everything without noticing. \"", "If you are seeing this, it's time to backup your notebook. No, really, 'tis too easy to mess up everything without noticing. \n" ] ], [ [ "# Final evaluation\nEvaluate network over the entire test set", "_____no_output_____" ] ], [ [ "#evaluation\nepoch_y_true = []\nepoch_y_pred = []\n\nb_c = b_loss = 0\nfor j, (b_desc,b_title,b_cat, b_y) in enumerate(\n iterate_minibatches(desc_ts,title_ts,nontext_ts,target_ts,batchsize=batch_size,shuffle=True)):\n loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)\n\n b_loss += loss\n b_c +=1\n\n epoch_y_true.append(b_y)\n epoch_y_pred.append(pred_probas)\n\n\nepoch_y_true = np.concatenate(epoch_y_true)\nepoch_y_pred = np.concatenate(epoch_y_pred)\n\nprint \"Scores:\"\nprint '\\tloss:',b_loss/b_c\nprint '\\trmse:',mean_squared_error(epoch_y_true,epoch_y_pred)**.5\nprint '\\tmae:',mean_absolute_error(epoch_y_true,epoch_y_pred)", "_____no_output_____" ] ], [ [ "Now tune the monster for least MSE you can get!", "_____no_output_____" ], [ "# Next time in our show\n* Recurrent neural networks\n * How to apply them to practical problems?\n * What else can they do?\n * Why so much hype around LSTM?\n* Stay tuned!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d058d29d1ce34525a643fb8e02d48600b58d1080
11,485
ipynb
Jupyter Notebook
slides/dlp03_introduction_to_keras_and_tf.ipynb
codingalzi/dlp
aac2df75ab9d51570dad9a17573987c9b4a89af0
[ "MIT" ]
null
null
null
slides/dlp03_introduction_to_keras_and_tf.ipynb
codingalzi/dlp
aac2df75ab9d51570dad9a17573987c9b4a89af0
[ "MIT" ]
null
null
null
slides/dlp03_introduction_to_keras_and_tf.ipynb
codingalzi/dlp
aac2df75ab9d51570dad9a17573987c9b4a89af0
[ "MIT" ]
null
null
null
22.171815
151
0.491859
[ [ [ "# 3장 케라스와 텐서플로우", "_____no_output_____" ], [ "## 주요 내용\n\n- 딥러닝 필수 요소\n- 케라스와 텐서플로우 간략 소개\n- 텐서플로우, 케라스, GPU를 활용한 딥러닝 작업환경\n- 케라스와 텐서플로우를 이용한 신경망의 핵심 구성요소 구현", "_____no_output_____" ], [ "## 3.1 텐서플로우 소개", "_____no_output_____" ], [ "### 텐서플로우\n\n- 구글을 중심으로 개발된 머신러닝 __플랫폼__(platform)\n - TF-Agents: 강화학습 연구 지원\n - TFX: 머신러닝 프로젝트 진행과정(workflow) 운영 지원\n - TF-Hub: 훈련된 모델 제공\n- 파이썬 기반\n- 텐서 연산 지원", "_____no_output_____" ], [ "### 넘파이(Numpy)와의 차이점\n\n- 미분 가능한 함수들의 그레이디언트 자동 계산\n- GPU, TPU 등 고성능 병렬 하드웨어 가속기 활용 가능\n - 높은 확장성: 일기예보, 바둑 프로그램 등 매우 많은 데이터와 계산이 요구되는 실전 상황에 활용됨.\n- C++(게임), 자바스크립트(웹브라우저), TFLite(모바일 장치) 등 다른 언어가 선호되는 \n 도메인 특화 프로그램에 쉽게 이식 가능", "_____no_output_____" ], [ "## 3.2 케라스", "_____no_output_____" ], [ "### 케라스와 텐서플로우\n\n- 딥러닝 모델 훈련에 최적화된 인터페이스 제공.\n- 원래 텐서플로우와 독립적으로 시작됨.\n- 텐서플로우 2.0부터 텐서플로우 라이브러리의 최상위 프레임워크(framework)로 포함됨.\n- 다양한 워크플로우 제공: 모델 구축과 훈련 방식에 있어서 고수준/저수준 방식 모두 제공", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"https://drek4537l1klr.cloudfront.net/chollet2/v-7/Figures/keras_and_tf.png\" style=\"width:650px;\"></div>\n\n그림 출처: [Deep Learning with Python(Manning MEAP)](https://www.manning.com/books/deep-learning-with-python-second-edition)", "_____no_output_____" ], [ "## 3.3 케라스와 텐서플로우의 약력", "_____no_output_____" ], [ "- 2007년: 씨아노(Theano) 공개. 캐나다 몬트리올 대학교 연구팀.\n - 계산 그래프, 미분 자동화 등을 최초로 활용\n- 2015년 3월: 케라스 라이브러리 공개\n - 씨아노(Theano)를 백앤드로 사용하는 고수준 패키지\n- 2015년 11월: 텐서플로우 라이브러리 공개\n- 2016년: 텐서플로우가 케라스의 기본 백엔드로 지정됨\n- 2017년: 씨아노, 텐서플로우, CNTK(마이크로소프트), MXNet(아마존)이 케라스의 백엔드로 지원됨.\n- 2019년 9월: 텐서플로우 2.0부터 케라스가 텐서플로우의 최상위 프레임워크로 지정됨.", "_____no_output_____" ], [ "## 3.4 딥러닝 작업환경", "_____no_output_____" ], [ "### GPU 활용 옵션\n\n- 개인 NVIDIA 그래픽카드가 장착된 PC 또는 노트북 사용\n - 딥러닝을 많이 활용하는 경우\n - Ubuntu 설치 또는 WSL(Windows Subsystem for Linux) 활용 추천\n- 구글 클라우드 플랫폼 또는 아마존 웹서비스(AWS EC2) 활용\n - 단기간동안 고성능 컴퓨터를 활용하고자 하는 경우\n- __구글 코랩 활용__\n - 강좌 이수 용도로 추천", "_____no_output_____" ], [ "### 구글 코랩 사용\n\n- 기본 사용법은 인터넷 검색 참조\n- 코드 실행에 필요한 추가 패키지 설치는 pip(파이썬 패키지 관리자) 활용\n ```python\n !pip install package_name\n ```\n- 참고: 느낌표(`!`)는 주피터 노트북 코드셀에서 터미널 명령어를 실행하는 경우 사용\n- GPU 활용: 런타임 유형을 GPU로 지정만 하면 됨.\n- TPU 활용: 좀 더 복잡한 세팅 필요. 13장 참조.", "_____no_output_____" ], [ "## 3.5 텐서플로우 기본 사용법", "_____no_output_____" ], [ "### 신경망 모델 훈련 핵심 1\n\n1. 상수 텐서와 변수 텐서\n - 상수 텐서(constant tensor): 입출력 데이터 등 변하지 않는 텐서\n - 변수 텐서(variable): 모델 가중치, 편향 등 업데이트 되는 텐서\n1. 텐서 연산: 덧셈, relu, 점곱 등\n1. 역전파(backpropagation): \n - 손실함수의 그레이디언트 계산 후 모델 가중치 업데이트\n - 그레이디언트 테이프(`GradientTape`) 이용", "_____no_output_____" ], [ "## 3.6 케스의 핵심 API 이해", "_____no_output_____" ], [ "### 신경망 모델 훈련 핵심 2\n\n1. 층(layer)과 모델: 층을 적절하게 쌓아 모델 구성\n1. 손실 함수(loss function): 학습 방향을 유도하는 피드백 역할 수행\n1. 옵티마이저(optimizer): 학습 방향을 정하는 기능 수행\n1. 메트릭(metric): 정확도 등 모델 성능 평가 용도\n1. 훈련 반복(training loop): 미니 배치 경사하강법 실행", "_____no_output_____" ], [ "### 층(layer)의 역할\n\n- 모델의 상태(지식)로 사용되는 가중치(weight)와 편향(bias) 저장\n- 데이터 표현 변환(forwardd pass)\n- 케라스 활용 딥러닝 모델: 호환 가능한 층들의 적절한 연결", "_____no_output_____" ], [ "#### 층의 종류와 처리 가능 텐서\n\n- `Dense` 클래스를 사용하는 밀집층(dense layer): \n `(샘플수, 특성수)` 모양의 2D 텐서로 제공된 데이터셋\n- `LSTM` 클래스, `Conv1D` 클래스 등을 사용하는 순환층(recurrent layer): \n `(샘플수, 타임스텝수, 특성수)` 모양의 3D 텐서로 제공된 순차 데이터셋\n- `Cons2D` 클래스 등을 사용하는 층: \n `(샘플수, 가로, 세로, 채널수)` 모양의 4D 텐서로 제공된 이미지 데이터셋", "_____no_output_____" ], [ "#### `Layer` 클래스와 `__call__()` 메서드\n\n- 케라스에서 사용되는 모든 층에 대한 부모 클래스\n- `__call__()` 메서드의 역할\n - 가중치와 편향 벡터 생성 및 초기화\n - 입력 데이터를 출력 데이터로 변환", "_____no_output_____" ], [ "#### `__call__()` 메서드의 대략적 정의\n\n```python\ndef __call__(self, inputs):\n if not self.built:\n self.build(inputs.shape)\n self.built = True\nreturn self.call(inputs)\n```", "_____no_output_____" ], [ "- `self.built`: 가중치와 편향 벡터가 초기화가 되어 있는지 여부 기억\n- `self.build(inputs.shape)`: 입력 배치 데이터셋(`inputs`)의 모양(shape) 정보 이용\n - 가중치 텐서 생성 및 무작위적으로 초기화\n - 편향 텐서 생성 및 0벡터로 초기화\n- `self.call(inputs)`: 출력값 계산(forward pass)\n - 아핀 변환 및 활성화 함수 적용", "_____no_output_____" ], [ "### 층에서 모델로\n\n- 입렵값을 보고 바로 입력값의 모양 확인\n- MNIST 모델 사용된 `Dense` 클래스처럼 입력 데이터에 정보 미리 요구하지 않음", "_____no_output_____" ], [ "```python\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nmodel = keras.Sequential([\n layers.SimpleDense(512, activation=\"relu\"),\n layers.SimpleDense(10, activation=\"softmax\")\n])\n```", "_____no_output_____" ], [ "#### 딥러닝 모델\n\n- 층으로 구성된 그래프\n- 예제: `Sequential` 모델\n - 층을 일렬로 쌓은 신경망 제공\n - 아래 층에서 전달한 값을 받아 변환한 후 위 층으로 전달\n- 예제: 트랜스포머(Transformer)", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"https://drek4537l1klr.cloudfront.net/chollet2/v-7/Figures/transformer0001.png\" style=\"width:400px;\"></div>\n\n그림 출처: [Deep Learning with Python(Manning MEAP)](https://www.manning.com/books/deep-learning-with-python-second-edition)", "_____no_output_____" ], [ "#### 망 구성방식과 가설 공간\n\n- 모델의 학습과정은 층을 어떻게 구성하였는가에 전적으로 의존함.\n- 여러 개의 `Dense` 층을 이용한 `Sequential` 모델\n - 아핀 변환,`relu()` 등의 활성화 함수를 연속적으로 적용한 데이터 표현 변환\n- 다른 방식으로 구성된 모델: 다른 방식으로 텐서 표현 변환\n- 이렇듯 층을 구성하는 방식에 따라 텐서들이 가질 수 있는 표현들의 공간이 정해짐.\n- '**망 구성방식(network topology)에 따른 표현 가설 공간(hypothesis space)**'이 지정됨.\n- 신경망의 구성\n - 주어진 데이터셋과 모델의 목적에 따라 결정됨.\n - 특별한 규칙 또는 이론은 없음.\n - 이론 보다는 많은 실습을 통한 경험에 의존", "_____no_output_____" ], [ "### 모델 컴파일\n\n모델의 구조를 정의한 후에 아래 세 가지 설정을 추가로 지정해야 함.\n\n- 옵티마이저(optimizer): 모델의 성능을 향상시키는 방향으로 가중치를 업데이트하는 알고리즘\n- 손실함수(loss function): 훈련 중 모델의 성능 얼마 나쁜가를 측정하는 기준. \n 미분가능이어야 하며 옵티마이저가 경사하강법을 활용하여 손실함숫값을 줄이는 방향으로 작동함.\n- 평가지표(metrics):: 훈련과 테스트 과정을 모니터링 할 때 사용되는 모델 평가 지표. \n 옵티마이저 또는 손실함수와 일반적으로 상관 없음.", "_____no_output_____" ], [ "### `fit()` 메서드 작동법\n\n모델을 훈련시키려면 `fit()` 메서드를 적절한 인자들과 함께 호출해야 함.\n\n- 훈련 세트: 보통 넘파이 어레이 또는 텐서플로우의 `Dataset` 객체 사용\n- 에포크(`epochs`): 전체 훈련 세트를 몇 번 훈련할 지 지정\n- 배치 크기(`batch_size`): 배치 경사하강법에 적용될 배치(묶음) 크기 지정\n\n아래 코드는 앞서 넘파이 어레이로 생성한 (2000, 2) 모양의 양성, 음성 데이터셋을 대상으로 훈련한다. ", "_____no_output_____" ], [ "### 검증 세트 활용\n\n훈련된 모델이 완전히 새로운 데이터에 대해 예측을 잘하는지 여부를 판단하려면\n전체 데이터셋을 훈련 세트와 **검증 세트**로 구분해야 함.\n\n- 훈련 세트: 모델 훈련에 사용되는 데이터셋\n- 검증 세트: 훈련된 모델 평가에 사용되는 데이터셋", "_____no_output_____" ], [ "```python\nmodel.fit(\n training_inputs,\n training_targets,\n epochs=5,\n batch_size=16,\n validation_data=(val_inputs, val_targets)\n)\n```", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d058eef813a442ddc1e2b8fd6188e1cee1899e0a
440,315
ipynb
Jupyter Notebook
LinAlg_Midterms (1).ipynb
adriangalarion/Lab-Activities-1.1
5e5448f79895080c70ba4ceb357cbc1fba7b5e95
[ "Apache-2.0" ]
null
null
null
LinAlg_Midterms (1).ipynb
adriangalarion/Lab-Activities-1.1
5e5448f79895080c70ba4ceb357cbc1fba7b5e95
[ "Apache-2.0" ]
null
null
null
LinAlg_Midterms (1).ipynb
adriangalarion/Lab-Activities-1.1
5e5448f79895080c70ba4ceb357cbc1fba7b5e95
[ "Apache-2.0" ]
null
null
null
363.896694
233,176
0.919683
[ [ [ "<h1>Linear Algebra (CpE210A)", "_____no_output_____" ], [ "<h3>Midterms Project", "_____no_output_____" ], [ "Coded and submitted by:<br>\n<i>Galario, Adrian Q.<br>\n 201814169 <br>\n 58051</i>", "_____no_output_____" ], [ "Directions\nThis Jupyter Notebook will serve as your base code for your Midterm Project. You must further format and provide complete discussion on the given topic. \n- Provide all necessary explanations for specific code blocks. \n- Provide illustrations for key results.\n- Observe clean code (intuitive variable names, proper commenting, proper code spacing)\n- Provide a summary discussion at the end\n\nFailure to use this format or failure to update the document will be given a deduction equivalent to 50% of the original score. ", "_____no_output_____" ], [ "### Case", "_____no_output_____" ], [ "Bebang is back to consult you about her business. Furthering her data analytics initiative she asks you for help to compute some relevant data. Now she is asking you to compute and visualize her sales and costs for the past year. She has given you the datasets attached to her request.", "_____no_output_____" ], [ "### Problem", "_____no_output_____" ], [ "State and explain Bebang's problem here and provide the deliverables.", "_____no_output_____" ], [ "# Proof of Concept", "_____no_output_____" ], [ "Now that you have a grasp on the requirements we need to start with making a program to prove that her problem is solvable. As a Linear Algebra student, we will be focusin on applying vector operations to meet her needs. First, we need to import her data. We will use the `pandas` library for this. For more information you can look into their documentation [here](https://pandas.pydata.org/).", "_____no_output_____" ] ], [ [ "import seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\n\n%matplotlib inline", "_____no_output_____" ], [ "df_prices = pd.read_csv(r'C:\\Users\\EyyGiee\\Desktop\\Bebang\\bebang prices.csv')\ndf_sales = pd.read_csv(r'C:\\Users\\EyyGiee\\Desktop\\Bebang\\bebang sales.csv')\n", "_____no_output_____" ], [ "df_prices", "_____no_output_____" ], [ "df_sales", "_____no_output_____" ] ], [ [ "## Part 1: Monthly Sales", "_____no_output_____" ] ], [ [ "sales_mat = np.array(df_sales.set_index('flavor'))\nprices_mat = np.array(df_prices.set_index('Unnamed: 0'))[0] \ncosts_mat = np.array(df_prices.set_index('Unnamed: 0'))[1]\n\nprice_reshaped=np.reshape(prices_mat,(12,1))\ncost_reshaped=np.reshape(costs_mat,(12,1))\n\nprint(sales_mat.shape)\nprint(price_reshaped.shape)\nprint(cost_reshaped.shape)", "(12, 12)\n(12, 1)\n(12, 1)\n" ] ], [ [ "#### Formulas", "_____no_output_____" ], [ "Take note that the fomula for revenue is: <br>\n$revenue = sales * price $ <br>\nIn this case, think that revenue, sales, and price are vectors instead of individual values <br>\nThe formula of cost per item sold is: <br>\n$cost_{sold} = sales * cost$ <br>\nThe formula for profit is: <br>\n$profit = revenue - cost_{sold}$ <br>\nSolving for the monthly profit will be the sum of all profits made on that month.", "_____no_output_____" ] ], [ [ "## Function that returns and prints the monthly sales and profit for each month\ndef monthly_sales(price, cost, sales):\n monthly_revenue = sum(sales*price)\n monthly_costs = sum(sales*cost)\n monthly_profits = (monthly_revenue - monthly_costs)\n return monthly_revenue.flatten(), monthly_costs.flatten(), monthly_profits.flatten()", "_____no_output_____" ], [ "### Using the monthly_sales function to compute for the revenue, cost, and profit\n## Then passing the values to month_rev, month_cost, and month_profit\nmonth_rev, month_cost, month_profit = monthly_sales(prices_mat, costs_mat, sales_mat)\n\n### printing the values\nprint(\"Monthly Revenue(Starting from the month of January): \\n\", month_rev)\nprint(\"\\nYearly Revenue: \\n\", sum(month_rev))\nprint(\"\\nMonthly Cost(Starting from the month of January): \\n\", month_cost)\nprint(\"\\nYearly Cost: \\n\", sum(month_cost))\nprint(\"\\nMonthly Profit(Starting from the month of January): \\n\", month_profit)\nprint(\"\\nYearly Profit: \\n\", sum(month_profit))", "Monthly Revenue(Starting from the month of January): \n [216510 116750 84900 26985 208850 17360 18760 19035 12090 22960\n 260775 422010]\n\nYearly Revenue: \n 1426985\n\nMonthly Cost(Starting from the month of January): \n [154650 70050 42450 15420 146195 13454 14070 10575 6045 14350\n 185440 290718]\n\nYearly Cost: \n 963417\n\nMonthly Profit(Starting from the month of January): \n [ 61860 46700 42450 11565 62655 3906 4690 8460 6045 8610\n 75335 131292]\n\nYearly Profit: \n 463568\n" ] ], [ [ "## Part 2: Flavor Sales", "_____no_output_____" ] ], [ [ "## Function that returns and prints the flavor profits for the whole year \ndef flavor_sales(price, cost, sales):\n flavor_revenue = sales*price\n flavor_costs = sales*cost\n flavor_profits = flavor_revenue - flavor_costs \n return flavor_profits.flatten()", "_____no_output_____" ], [ "\n### Using the flavor_sales function to compute for the profit \n## Then passing the values to flavor_profit variable\nflavor_profit = flavor_sales(prices_mat, costs_mat, sales_mat)\n\n## Values of profit of flavors will be inserted here\nflavor1 = []\nflavor2 = []\nflavor3 = []\nflavor4 = []\nflavor5 = []\nflavor6 = []\n\nflavor7 = []\nflavor8 = []\nflavor9 = []\nflavor10 = []\nflavor11 = []\nflavor12 = []\n\n## Loop that will append the values(profit) to their respective variables above \n## The variables above was created so that the sum can be computed by row(to get the yearly profit per flavor)\n## Unlike getting the sum of flavor_profits inside the function flavor_sales, it will get the sum per column(which will get the profit of all flavor per month)\nfor x in flavor_profit:\n if len(flavor1)<=11:\n flavor1.append(x)\n elif len(flavor2)<=11:\n flavor2.append(x)\n elif len(flavor3)<=11:\n flavor3.append(x)\n elif len(flavor4)<=11:\n flavor4.append(x)\n elif len(flavor5)<=11:\n flavor5.append(x)\n elif len(flavor6)<=11:\n flavor6.append(x)\n elif len(flavor7)<=11:\n flavor7.append(x)\n elif len(flavor8)<=11:\n flavor8.append(x)\n elif len(flavor9)<=11:\n flavor9.append(x)\n elif len(flavor10)<=11:\n flavor10.append(x)\n elif len(flavor11)<=11:\n flavor11.append(x)\n elif len(flavor12)<=11:\n flavor12.append(x)\n \n## Profit of each flavor per year \nflavor_profits = np.array([sum(flavor1),sum(flavor2),sum(flavor3),sum(flavor4),sum(flavor5),sum(flavor6),sum(flavor7),sum(flavor8),sum(flavor9),\n sum(flavor10),sum(flavor11),sum(flavor12)])\n\n### Printing the values\nprint(\"The row represents each flavor while the column represents the months\")\nprint(\"The order of flavor and months in rows and columns is the same as in df_sales\\n\")\nprint(\"Profit of Flavor per Month: \\n\", flavor_profit)\nprint(\"\\nThe order of the flavor is the same as in df_sales\\n\")\nprint(\"Flavor Profit per Year: \\n\", flavor_profits)", "The row represents each flavor while the column represents the months\nThe order of flavor and months in rows and columns is the same as in df_sales\n\nProfit of Flavor per Month: \n [ 7810 5240 3640 2355 13410 504 890 820 345 1170 3003 12586\n 2100 3240 3440 480 1845 9 30 0 15 90 4056 7672\n 5990 6740 8540 210 7005 702 360 960 1125 1470 10153 22428\n 1240 1120 1140 345 2970 108 320 820 345 540 3900 7924\n 6450 4980 8850 1470 2655 882 430 1920 390 1065 7137 12558\n 2100 3720 3690 1170 2445 234 450 440 810 615 2691 6384\n 9810 3800 1540 210 2205 135 980 660 345 540 7176 8862\n 4780 4620 3540 735 1335 0 10 0 15 120 4355 2996\n 5980 2030 360 1440 11835 36 50 0 120 0 10257 6412\n 3240 1230 480 480 975 666 780 900 1335 1500 8216 3234\n 4580 4120 780 1170 1170 63 30 160 30 15 4199 7896\n 7780 5860 6450 1500 14805 567 360 1780 1170 1485 10192 32340]\n\nThe order of the flavor is the same as in df_sales\n\nFlavor Profit per Year: \n [51773 22977 65683 20772 48787 24749 36263 22506 38520 23036 24213 84289]\n" ], [ "## Putting the list of flavors into array\nflavors = np.array(pd.read_csv(\"bebang sales.csv\", usecols=[0]))\n\n## Converting the arrays into lists\n## Using list is easier to match/zip them\nfprofit_list = flavor_profits.tolist()\nflavor_list = flavors.tolist()\n\n## Matched the two list, to know the profit of each flavor and to be sorted later\nmatched_list = list(zip(fprofit_list, flavor_list))\n\n### Sorting of the flavors by their profit and displaying the first element(flavors) only \nbest_3_flavors = [x[1] for x in sorted(matched_list, reverse=True)]\nworst_3_flavors = [x[1] for x in sorted(matched_list)]\n\n## Printing of the three best and worst flavors\nprint(\"Best Selling Flavors: \\n\", best_3_flavors[0:3])\nprint(\"\\nWorst Selling Flavors: \\n\", worst_3_flavors[0:3])", "Best Selling Flavors: \n [['choco butter naught'], ['sugar glazed'], ['red velvet']]\n\nWorst Selling Flavors: \n [['almond honey'], ['furits and nuts'], ['oreo']]\n" ] ], [ [ "## Part 3: Visualizing the Data (Optional for +40%)\nYou can try to visualize the data in the most comprehensible chart that you can use.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\nimport pandas as pd\nimport csv\n%matplotlib inline", "_____no_output_____" ] ], [ [ "#### Entire Dataset", "_____no_output_____" ] ], [ [ "## Graph for Sales of each flavor\n## Table inside the original file(bebang sales) was transposed in the excel, columns were converted to rows\ndf_sales_Transposed = pd.read_csv(r\"C:\\Users\\EyyGiee\\Desktop\\Bebang\\bebang sales(transpose).csv\")\n\n## Transposing the table makes it easier to plot the data inside it\n## The column header 'flavor' was changed to 'Months'\ndf_sales_Transposed.plot(x=\"Months\", figsize=(25,15))\nplt.title('Sales of Each Flavor')", "_____no_output_____" ], [ "## Graph for Price vs Cost per Flavor\n## Declaring the font size and weight to be used in the graph\nfont = {'weight' : 'bold',\n 'size' : 15}\nmatplotlib.rc('font', **font)\n\n## Declaration of the figure to be used\nfig = plt.figure()\nax = fig.add_axes([0,0,4,4])\nax.set_title('Price vs Cost per Flavor')\n\n## For the legends used in the graph\ncolors = {'Price':'blue', 'Cost':'green'} \nlabels = list(colors.keys())\nhandles = [plt.Rectangle((0,0),1,1, color=colors[label]) for label in labels]\nplt.legend(handles, labels, loc='upper left', prop={'size': 40})\n\n## Plotting of the values for the bar graph\n## Price and cost were plotted in one x-axis per flavor to see the difference between the two variable\nax.bar('Red Velvet' ,prices_mat[0], color = 'b', width = 0.50)\nax.bar('Red Velvet' ,costs_mat[0], color = 'g', width = 0.50)\n\nax.bar('Oreo' ,prices_mat[1], color = 'b', width = 0.50)\nax.bar('Oreo' ,costs_mat[1], color = 'g', width = 0.50)\n\nax.bar('Super Glazed' ,prices_mat[2], color = 'b', width = 0.50)\nax.bar('Super Glazed' ,costs_mat[2], color = 'g', width = 0.50)\n\nax.bar('Almond Honey' ,prices_mat[3], color = 'b', width = 0.50)\nax.bar('Almond Honey' ,costs_mat[3], color = 'g', width = 0.50)\n\nax.bar('Matcha' ,prices_mat[4], color = 'b', width = 0.50)\nax.bar('Matcha' ,costs_mat[4], color = 'g', width = 0.50)\n\nax.bar('Strawberry Cream' ,prices_mat[5], color = 'b', width = 0.50)\nax.bar('Strawberry Cream' ,costs_mat[5], color = 'g', width = 0.50)\n\nax.bar('Brown \\nSugar Boba' ,prices_mat[6], color = 'b', width = 0.50)\nax.bar('Brown \\nSugar Boba' ,costs_mat[6], color = 'g', width = 0.50)\n\nax.bar('Fruits \\nand Nuts' ,prices_mat[7], color = 'b', width = 0.50)\nax.bar('Fruits \\nand Nuts' ,costs_mat[7], color = 'g', width = 0.50)\n\nax.bar('Dark \\nChocolate' ,prices_mat[8], color = 'b', width = 0.50)\nax.bar('Dark \\nChocolate' ,costs_mat[8], color = 'g', width = 0.50)\n\nax.bar('Chocolate \\nand Orange' ,prices_mat[9], color = 'b', width = 0.50)\nax.bar('Chocolate \\nand Orange' ,costs_mat[9], color = 'g', width = 0.50)\n\nax.bar('Choco Mint' ,prices_mat[10], color = 'b', width = 0.50)\nax.bar('Choco Mint' ,costs_mat[10], color = 'g', width = 0.50)\n\nax.bar('Choco \\nButter Naught' ,prices_mat[11], color = 'b', width = 0.50)\nax.bar('Choco \\nButter Naught' ,costs_mat[11], color = 'g', width = 0.50)", "_____no_output_____" ] ], [ [ "#### Monthly Sales", "_____no_output_____" ] ], [ [ "## Graph for Revenue vs Cost per Month\n## Declaring the font size and weight to be used in the graph\nfont = {'weight' : 'bold',\n 'size' : 15}\nmatplotlib.rc('font', **font)\n\n## Declaration of the figure to be used in the graph\nfig = plt.figure()\nax = fig.add_axes([0,0,4,4])\nax.set_title('Revenue vs Cost per Month')\nax.set_ylabel('Revenue/Cost')\nax.set_xlabel('Months')\n\n## For the legends used in the graph\ncolors = {'Revenue':'blue', 'Cost':'green'} \nlabels = list(colors.keys())\nhandles = [plt.Rectangle((0,0),1,1, color=colors[label]) for label in labels]\nplt.legend(handles, labels, loc='upper left', prop={'size': 40})\n\n## Plotting of the values for the bar graph \n## Revenue and cost were plotted in one x-axis per month to see the difference between the two variable\nax.bar('January' ,month_rev[0], color = 'b', width = 0.50)\nax.bar('January' ,month_cost[0], color = 'g', width = 0.50)\n\nax.bar('February' ,month_rev[1], color = 'b', width = 0.50)\nax.bar('February' ,month_cost[1], color = 'g', width = 0.50)\n\nax.bar('March' ,month_rev[2], color = 'b', width = 0.50)\nax.bar('March' ,month_cost[2], color = 'g', width = 0.50)\n\nax.bar('April' ,month_rev[3], color = 'b', width = 0.50)\nax.bar('April' ,month_cost[3], color = 'g', width = 0.50)\n\nax.bar('May' ,month_rev[4], color = 'b', width = 0.50)\nax.bar('May' ,month_cost[4], color = 'g', width = 0.50)\n\nax.bar('June' ,month_rev[5], color = 'b', width = 0.50)\nax.bar('June' ,month_cost[5], color = 'g', width = 0.50)\n\nax.bar('July' ,month_rev[6], color = 'b', width = 0.50)\nax.bar('July' ,month_cost[6], color = 'g', width = 0.50)\n\nax.bar('August' ,month_rev[7], color = 'b', width = 0.50)\nax.bar('August' ,month_cost[7], color = 'g', width = 0.50)\n\nax.bar('September' ,month_rev[8], color = 'b', width = 0.50)\nax.bar('September' ,month_cost[8], color = 'g', width = 0.50)\n\nax.bar('October' ,month_rev[9], color = 'b', width = 0.50)\nax.bar('October' ,month_cost[9], color = 'g', width = 0.50)\n\nax.bar('November' ,month_rev[10], color = 'b', width = 0.50)\nax.bar('November' ,month_cost[10], color = 'g', width = 0.50)\n\nax.bar('December' ,month_rev[11], color = 'b', width = 0.50)\nax.bar('December' ,month_cost[11], color = 'g', width = 0.50)", "_____no_output_____" ], [ "## Graph for profit per month\n## Declaration of the figure to be used\nfig = plt.figure()\nax = fig.add_axes([0,0,3,2])\nax.set_ylabel('Profit')\nax.set_xlabel('Months')\nax.set_title('Profit per Month')\n\n## Declaring the values of each axis\nmonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\nProfits = month_profit\n\n## Declaration of the axes and printing/showing them\nax.bar(months, Profits)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Flavor Sales", "_____no_output_____" ] ], [ [ "## Graph for Flavor profit\n## Declaration of the figure to be used\nfig = plt.figure()\nax = fig.add_axes([0,0,3,2])\nax.set_ylabel('Profit')\nax.set_xlabel('Flavors')\nax.set_title('Flavor Profit')\n\n## Declaring the values of each axis\nflavors = ['Red Velvet', 'Oreo', 'Super \\nGlazed', 'Almond \\nHoney', 'Matcha', 'Strawberry \\nCream', 'Brown \\nSugar Boba', \n 'Fruits \\nand Nuts', 'Dark \\nChocolate', 'Chocolate \\nOrange', 'Choco Mint', 'Choco \\nButter Naught']\nProfits = flavor_profits\n\n## Declaration of the axes and printing/showing them\nax.bar(flavors, Profits)\nplt.show()", "_____no_output_____" ] ], [ [ "## Part 4: Business Recommendation and Conclusion\nPresent the findings of your data analysis and provide recommendations", "_____no_output_____" ], [ "The software reveals that the top three flavors that sell the most for Bebang's business are choco butter naught, matcha, and super glazed, while the worst or bottom three flavors for her business are strawberry cream, oreo, and almond honey after computing and plotting the data from her business. The software also displays the calculated monthly cost, annual cost, yearly revenue, and monthly revenue for all flavors. Bebang makes the most profit in December and the least profit in September, according to the findings. The software also generates Bebang's monthly benefit from all of the flavors. Based on the information gathered, it is suggested that Bebang make a large quantity of the top three best-selling flavors because it will help her company and the expense of producing those flavors will not be wasted and will be recovered through sales. Bebang should also reduce the quantity of the top three worst flavors to help her company avoid spending a lot of money on them without making any profit. Another piece of advice for Bebang is to keep track of the months when the flavors sell best so she can prepare ahead for the amount she'll need to avoid food shortages and excesses. Bebang's company is doing well in general. Bebang's company expenses per flavor are more than offset by the sales she makes. Different marketing techniques can also assist her in growing her company and generating large profits not only for a month, but for the entire year. Bebang could also conduct a customer survey to determine what changes she can make, especially to the flavors that sell the least.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d059258b8ece9f0199c50ae44982406f8be4fdd9
1,741
ipynb
Jupyter Notebook
MergeAllCSV.ipynb
Ashish-Dhage7/Medicine-Review_Scraping-using-python
e02d6e7013999113c6a559da50fef03cdcb884f1
[ "MIT" ]
null
null
null
MergeAllCSV.ipynb
Ashish-Dhage7/Medicine-Review_Scraping-using-python
e02d6e7013999113c6a559da50fef03cdcb884f1
[ "MIT" ]
null
null
null
MergeAllCSV.ipynb
Ashish-Dhage7/Medicine-Review_Scraping-using-python
e02d6e7013999113c6a559da50fef03cdcb884f1
[ "MIT" ]
null
null
null
20.244186
108
0.535325
[ [ [ "#Importing Libraries\nimport os\nimport glob\nimport pandas as pd", "_____no_output_____" ], [ "#Chossing our directory where we put our all csv files\nos.chdir(\"./\")", "_____no_output_____" ], [ "#Here we search our all csv files which are present in our directory and stored it in all_filenames\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]", "_____no_output_____" ], [ "#combine all files in the list\ncombined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])", "_____no_output_____" ], [ "#export to csv\ncombined_csv.to_csv( \"combined_csv.csv\", index=False, encoding='utf-8-sig')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0592a41cb8d02e52431faa97dc0c710cd3012b3
25,496
ipynb
Jupyter Notebook
.ipynb_checkpoints/data_visualization-checkpoint.ipynb
qlongyinqw/gcn-japan-weather-forecast
645f8239b1a913c0b6fc7b7b9ecaf87c0e5e9ea8
[ "MIT" ]
8
2019-05-25T07:16:09.000Z
2022-01-09T19:44:14.000Z
.ipynb_checkpoints/data_visualization-checkpoint.ipynb
qlongyinqw/gcn-japan-weather-forecast
645f8239b1a913c0b6fc7b7b9ecaf87c0e5e9ea8
[ "MIT" ]
null
null
null
.ipynb_checkpoints/data_visualization-checkpoint.ipynb
qlongyinqw/gcn-japan-weather-forecast
645f8239b1a913c0b6fc7b7b9ecaf87c0e5e9ea8
[ "MIT" ]
1
2019-11-26T00:51:34.000Z
2019-11-26T00:51:34.000Z
55.066955
11,452
0.635119
[ [ [ "import pandas as pd\nsensor_metadata = pd.read_csv(\"./data/tokyo/amd_master.tsv\", delimiter=\"\\t\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "/Users/huynguyen/tensorflow-py3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n/Users/huynguyen/tensorflow-py3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n" ], [ "sensor_metadata.head()", "_____no_output_____" ], [ "ax = sns.jointplot(y='lat1', x=\"lng1\", data=sensor_metadata)", "/Users/huynguyen/tensorflow-py3/lib/python3.6/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n" ] ], [ [ "Let's plot one of the Time Series.", "_____no_output_____" ] ], [ [ "from io_utils import load_sensor_data, file_names\ndf = load_sensor_data(file_names[20])", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 276048 entries, 2012-01-01 00:10:00 to 2017-03-31 00:00:00\nData columns (total 18 columns):\npr 276020 non-null float64\nf_pr 276048 non-null int64\nmax_ws 275923 non-null float64\nf_max_ws 276048 non-null int64\nave_wv 275917 non-null float64\nf_ave_wv 276048 non-null int64\nave_ws 275917 non-null float64\nf_ave_ws 276048 non-null int64\nmax_tp 275929 non-null float64\nf_max_tp 276048 non-null int64\nmin_tp 275929 non-null float64\nf_min_tp 276048 non-null int64\nsl 275982 non-null float64\nf_sl 276048 non-null int64\nsd 5 non-null float64\nf_sd 276048 non-null int64\ndsd 5 non-null float64\nf_dsd 276048 non-null int64\ndtypes: float64(9), int64(9)\nmemory usage: 40.0 MB\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0592b7b6ba97de0295e46df3b1b7d131ec96862
774
ipynb
Jupyter Notebook
site/1-programming/18-arrayprocessingwithnumpy/arrayprocessingwithnumpy.ipynb
dustykat/p4e
2b77d3b87ff1c7f1f4fddc835a6e6581cdac3c61
[ "CC0-1.0" ]
null
null
null
site/1-programming/18-arrayprocessingwithnumpy/arrayprocessingwithnumpy.ipynb
dustykat/p4e
2b77d3b87ff1c7f1f4fddc835a6e6581cdac3c61
[ "CC0-1.0" ]
null
null
null
site/1-programming/18-arrayprocessingwithnumpy/arrayprocessingwithnumpy.ipynb
dustykat/p4e
2b77d3b87ff1c7f1f4fddc835a6e6581cdac3c61
[ "CC0-1.0" ]
null
null
null
18
43
0.531008
[ [ [ "# array processing with numpy\n- linear algebra using using numpy\n- solve linear system in numpy\n- example\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
d0593360ca9cfd21035913f021ff36dfe85da4e1
12,292
ipynb
Jupyter Notebook
notebooks/03 Web Scraping.ipynb
RaduMihut/titanic
2db742561893fe4a5ad8fea8ef1c58d32fdab093
[ "MIT" ]
null
null
null
notebooks/03 Web Scraping.ipynb
RaduMihut/titanic
2db742561893fe4a5ad8fea8ef1c58d32fdab093
[ "MIT" ]
null
null
null
notebooks/03 Web Scraping.ipynb
RaduMihut/titanic
2db742561893fe4a5ad8fea8ef1c58d32fdab093
[ "MIT" ]
null
null
null
25.502075
358
0.471526
[ [ [ "## Extracting Data using Web Scraping ", "_____no_output_____" ] ], [ [ "# import \nimport requests \nfrom bs4 import BeautifulSoup ", "_____no_output_____" ], [ "# HTML String\nhtml_string = \"\"\"\n<!doctype html>\n<html lang=\"en\">\n<head>\n <title>Doing Data Science With Python</title>\n</head>\n<body>\n <h1 style=\"color:#F15B2A;\">Doing Data Science With Python</h1>\n <p id=\"author\">Author : Abhishek Kumar</p>\n <p id=\"description\">This course will help you to perform various data science activities using python.</p>\n \n <h3 style=\"color:#404040\">Modules</h3>\n <table id=\"module\" style=\"width:100%\">\n <tr>\n <th>Title</th>\n <th>Duration (In Minutes)</th> \n </tr>\n <tr>\n <td>Getting Started</td>\n <td>20</td> \n </tr>\n <tr>\n <td>Setting up the Environment</td>\n <td>40</td> \n </tr>\n <tr>\n <td>Extracting Data</td>\n <td>35</td> \n </tr>\n <tr>\n <td>Exploring and Processing Data - Part 1</td>\n <td>45</td> \n </tr>\n <tr>\n <td>Exploring and Processing Data - Part 2</td>\n <td>45</td> \n </tr>\n <tr>\n <td>Building Predictive Model</td>\n <td>30</td> \n </tr>\n </table>\n</body>\n</html>\n\"\"\"", "_____no_output_____" ], [ "# display HTML string in the juptyer notebook\nfrom IPython.core.display import display, HTML\ndisplay(HTML(html_string))", "_____no_output_____" ], [ "# use beautiful soup \nps = BeautifulSoup(html_string)", "/anaconda3/lib/python3.6/site-packages/bs4/__init__.py:181: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line 193 of the file /anaconda3/lib/python3.6/runpy.py. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup(YOUR_MARKUP})\n\nto this:\n\n BeautifulSoup(YOUR_MARKUP, \"lxml\")\n\n markup_type=markup_type))\n" ], [ "# print b\nprint(ps)", "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<title>Doing Data Science With Python</title>\n</head>\n<body>\n<h1 style=\"color:#F15B2A;\">Doing Data Science With Python</h1>\n<p id=\"author\">Author : Abhishek Kumar</p>\n<p id=\"description\">This course will help you to perform various data science activities using python.</p>\n<h3 style=\"color:#404040\">Modules</h3>\n<table id=\"module\" style=\"width:100%\">\n<tr>\n<th>Title</th>\n<th>Duration (In Minutes)</th>\n</tr>\n<tr>\n<td>Getting Started</td>\n<td>20</td>\n</tr>\n<tr>\n<td>Setting up the Environment</td>\n<td>40</td>\n</tr>\n<tr>\n<td>Extracting Data</td>\n<td>35</td>\n</tr>\n<tr>\n<td>Exploring and Processing Data - Part 1</td>\n<td>45</td>\n</tr>\n<tr>\n<td>Exploring and Processing Data - Part 2</td>\n<td>45</td>\n</tr>\n<tr>\n<td>Building Predictive Model</td>\n<td>30</td>\n</tr>\n</table>\n</body>\n</html>\n\n" ], [ "# use name parameter to select by tag name\nbody = ps.find(name=\"body\")", "_____no_output_____" ], [ "print(body)", "<body>\n<h1 style=\"color:#F15B2A;\">Doing Data Science With Python</h1>\n<p id=\"author\">Author : Abhishek Kumar</p>\n<p id=\"description\">This course will help you to perform various data science activities using python.</p>\n<h3 style=\"color:#404040\">Modules</h3>\n<table id=\"module\" style=\"width:100%\">\n<tr>\n<th>Title</th>\n<th>Duration (In Minutes)</th>\n</tr>\n<tr>\n<td>Getting Started</td>\n<td>20</td>\n</tr>\n<tr>\n<td>Setting up the Environment</td>\n<td>40</td>\n</tr>\n<tr>\n<td>Extracting Data</td>\n<td>35</td>\n</tr>\n<tr>\n<td>Exploring and Processing Data - Part 1</td>\n<td>45</td>\n</tr>\n<tr>\n<td>Exploring and Processing Data - Part 2</td>\n<td>45</td>\n</tr>\n<tr>\n<td>Building Predictive Model</td>\n<td>30</td>\n</tr>\n</table>\n</body>\n" ], [ "# use text attribute to get the content of the tag\nprint(body.find(name=\"h1\").text)", "Doing Data Science With Python\n" ], [ "# get first element\nprint(body.find(name=\"p\"))", "<p id=\"author\">Author : Abhishek Kumar</p>\n" ], [ "# get all elements\nprint(body.findAll(name=\"p\"))", "[<p id=\"author\">Author : Abhishek Kumar</p>, <p id=\"description\">This course will help you to perform various data science activities using python.</p>]\n" ], [ "# loop through each element\nfor p in body.findAll(name=\"p\"):\n print(p.text)", "Author : Abhishek Kumar\nThis course will help you to perform various data science activities using python.\n" ], [ "# add attributes in the selection process\nprint(body.find(name='p', attrs={\"id\":\"author\"}))", "<p id=\"author\">Author : Abhishek Kumar</p>\n" ], [ "print(body.find(name='p', attrs={\"id\":\"description\"}))", "<p id=\"description\">This course will help you to perform various data science activities using python.</p>\n" ], [ "# body\nbody = ps.find(name=\"body\")\n# module table\nmodule_table = body.find(name='table', attrs={\"id\": \"module\"})\n# iterate through each row in the table (skipping the first row)\nfor row in module_table.findAll(name='tr')[1:]:\n # module title\n title = row.findAll(name='td')[0].text\n # module duration\n duration = int(row.findAll(name='td')[1].text)\n print title, duration", "Getting Started 20\nSetting up the Environment 40\nExtracting Data 35\nExploring and Processing Data - Part 1 45\nExploring and Processing Data - Part 2 45\nBuilding Predictive Model 30\n" ], [ "e", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0594051adfa6947bb010e195350c8701e5431b6
628
ipynb
Jupyter Notebook
elyra/kfp/tests/resources/test-bad-notebookA.ipynb
cch96/elyra
7b70ee6195dcc7b7c3661d7970d36e75dfcc5d35
[ "Apache-2.0" ]
1,312
2020-03-10T16:59:46.000Z
2022-03-31T21:30:10.000Z
elyra/kfp/tests/resources/test-bad-notebookA.ipynb
cch96/elyra
7b70ee6195dcc7b7c3661d7970d36e75dfcc5d35
[ "Apache-2.0" ]
2,063
2020-03-10T18:00:57.000Z
2022-03-31T18:02:20.000Z
elyra/kfp/tests/resources/test-bad-notebookA.ipynb
cch96/elyra
7b70ee6195dcc7b7c3661d7970d36e75dfcc5d35
[ "Apache-2.0" ]
242
2020-03-10T08:13:43.000Z
2022-03-28T08:28:06.000Z
17.444444
45
0.515924
[ [ [ "logA = open(\"A.txt\", \"r\").read()\nprint(logA)", "_____no_output_____" ] ] ]
[ "malformed" ]
[ [ "malformed" ] ]
d059527be412dec6e228adc7f82cad1054a89b09
285,324
ipynb
Jupyter Notebook
notebooks/old_5.1_bnlearn.ipynb
knights-lab/foodworks
6dff60e9a9d2fad44b1f95b757422f3cf830c99a
[ "MIT" ]
null
null
null
notebooks/old_5.1_bnlearn.ipynb
knights-lab/foodworks
6dff60e9a9d2fad44b1f95b757422f3cf830c99a
[ "MIT" ]
null
null
null
notebooks/old_5.1_bnlearn.ipynb
knights-lab/foodworks
6dff60e9a9d2fad44b1f95b757422f3cf830c99a
[ "MIT" ]
null
null
null
1,052.856089
279,244
0.945539
[ [ [ "library(bnlearn)\nlibrary(parallel)\n\ndata_numeric = read.table(\"../data/prediction.train.numeric.txt\", header = TRUE, sep=\"\\t\", row.names=1, check.names=F, stringsAsFactors=F)\nblacklist = read.table(\"../data/blacklist.txt\", header=T, sep=\"\\t\", row.names=1)", "_____no_output_____" ], [ "data_binary = read.table(\"../data/prediction.train.binary.txt\", header = TRUE, sep=\"\\t\", row.names=1, check.names=F, stringsAsFactors=T)", "_____no_output_____" ], [ "data_numeric <- as.data.frame(lapply(data_numeric, as.numeric))", "_____no_output_____" ], [ "# data = dedup(data_numeric, threshold=.95)", "_____no_output_____" ], [ "# cnames = sample(colnames(data_numeric), 160)\ncnames = colnames(data_numeric)", "_____no_output_____" ], [ "# dim(data)", "_____no_output_____" ], [ "data_small <- data_numeric[,cnames]", "_____no_output_____" ], [ "data = cbind(dedup(data_small, threshold=.95), data_binary)", "_____no_output_____" ], [ "blacklist_small <- blacklist[blacklist$from %in% colnames(data),]\nblacklist_small <- blacklist_small[blacklist_small$to %in% colnames(data),]", "_____no_output_____" ], [ "print(dim(blacklist_small))", "[1] 37601 2\n" ], [ "# blacklist_small", "_____no_output_____" ], [ "# data_small", "_____no_output_____" ], [ "# data_small = dedup(data_small, threshold=.95)", "_____no_output_____" ], [ "# data", "_____no_output_____" ], [ "learn.dag <- function() {\n cl = makeCluster(40, type = \"SOCK\")\n # dag = si.hiton.pc(data_small, cluster = cl, blacklist = blacklist_small, alpha = 0.05, test=\"mi-cg\")\n# dag = si.hiton.pc(data_small, cluster = cl, blacklist = blacklist_small, alpha=0.05, undirected=T, test=\"cor\")\n dag = si.hiton.pc(data, cluster = cl, blacklist = blacklist_small, alpha=0.05, undirected=T)\n # dag = si.hiton.pc(data_small, blacklist = blacklist_small, alpha = 0.05)\n on.exit(stopCluster(cl))\n return(dag)\n}\n\n\nstart_time <- Sys.time()\ndag <- learn.dag()\nend_time <- Sys.time()\namat.data = amat(dag)\nplot(dag)\nwrite.table(amat.data, \"../results/dag.prediction.train.csv\", sep=\"\\t\")\nprint(end_time - start_time)", "Time difference of 19.14727 mins\n" ], [ "dag", "_____no_output_____" ], [ "dag$nodes", "_____no_output_____" ], [ "# blacklist", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d059585359e06cf1bd5fadd53358a250c09384e0
104,116
ipynb
Jupyter Notebook
Final_file_for_tata_innoverse.ipynb
abhinav090/pothole_detection
9c21213394f417af2f183fcf97da799b93ee2b79
[ "Apache-2.0" ]
null
null
null
Final_file_for_tata_innoverse.ipynb
abhinav090/pothole_detection
9c21213394f417af2f183fcf97da799b93ee2b79
[ "Apache-2.0" ]
null
null
null
Final_file_for_tata_innoverse.ipynb
abhinav090/pothole_detection
9c21213394f417af2f183fcf97da799b93ee2b79
[ "Apache-2.0" ]
null
null
null
33.542526
1,782
0.555467
[ [ [ "<a href=\"https://colab.research.google.com/github/Prady96/Pothole-Detection/blob/avi_testing/Final_file_for_tata_innoverse.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!pip -V", "pip 19.1.1 from /usr/local/lib/python3.6/dist-packages/pip (python 3.6)\n" ], [ "!python -V", "Python 3.6.7\n" ], [ "!pip install --upgrade youtube-dl", "Collecting youtube-dl\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/45/60/3fc3026b82a3a0e37e6b598260c68abb446e7e85cba04318d2da17a95fac/youtube_dl-2019.5.20-py2.py3-none-any.whl (1.8MB)\n\u001b[K |████████████████████████████████| 1.8MB 9.8MB/s \n\u001b[?25hInstalling collected packages: youtube-dl\nSuccessfully installed youtube-dl-2019.5.20\n" ], [ "!youtube-dl https://drive.google.com/file/d/16-xNP_Ez-3WgFF3vfsP9KJl4ka9hXDlV/view?usp=sharing", "[GoogleDrive] 16-xNP_Ez-3WgFF3vfsP9KJl4ka9hXDlV: Downloading webpage\n[GoogleDrive] 16-xNP_Ez-3WgFF3vfsP9KJl4ka9hXDlV: Requesting source file\n[download] Destination: main_DataSETS_TRAIN.zip-16-xNP_Ez-3WgFF3vfsP9KJl4ka9hXDlV.zip\n\u001b[K[download] 100% of 2.79GiB in 00:21\n" ], [ "!youtube-dl https://drive.google.com/file/d/1rP5tveZgNXJZe_uipJNWUaSqJiow_LGc/view?usp=sharing", "[GoogleDrive] 1rP5tveZgNXJZe_uipJNWUaSqJiow_LGc: Downloading webpage\n[GoogleDrive] 1rP5tveZgNXJZe_uipJNWUaSqJiow_LGc: Requesting source file\n[download] Destination: main_DATASET_VAL.zip-1rP5tveZgNXJZe_uipJNWUaSqJiow_LGc.zip\n\u001b[K[download] 100% of 944.55MiB in 00:07\n" ], [ "!ls", "main_DataSETS_TRAIN.zip-16-xNP_Ez-3WgFF3vfsP9KJl4ka9hXDlV.zip sample_data\nmain_DATASET_VAL.zip-1rP5tveZgNXJZe_uipJNWUaSqJiow_LGc.zip\n" ], [ "!mv main_DATASET_VAL.zip-1rP5tveZgNXJZe_uipJNWUaSqJiow_LGc.zip val.zip", "_____no_output_____" ], [ "!mv main_DataSETS_TRAIN.zip-16-xNP_Ez-3WgFF3vfsP9KJl4ka9hXDlV.zip train.zip", "_____no_output_____" ], [ "!ls", "sample_data train.zip\tval.zip\n" ], [ "!unzip train.zip\n!unzip val.zip", "_____no_output_____" ], [ "!ls", "__MACOSX\t main_DATASET_VAL train.zip\nmain_DataSETS_TRAIN sample_data val.zip\n" ], [ "!rm -rf train.zip\n!rm -rf val.zip\n!mv main_DATASET_VAL/ val\n!mv main_DataSETS_TRAIN/ train", "_____no_output_____" ], [ "!ls", "__MACOSX sample_data train val\n" ], [ "!mkdir customImages", "_____no_output_____" ], [ "!rm -rf sample_data\n!rm -rf __MACOSX", "_____no_output_____" ], [ "!mv train/ customImages/\n!mv val/ customImages/", "_____no_output_____" ], [ "!ls", "customImages\n" ], [ "!git clone https://github.com/matterport/Mask_RCNN.git", "Cloning into 'Mask_RCNN'...\nremote: Enumerating objects: 956, done.\u001b[K\nremote: Total 956 (delta 0), reused 0 (delta 0), pack-reused 956\nReceiving objects: 100% (956/956), 119.39 MiB | 33.09 MiB/s, done.\nResolving deltas: 100% (568/568), done.\n" ], [ "!ls", "customImages Mask_RCNN\n" ], [ "!mv customImages/ Mask_RCNN/", "_____no_output_____" ], [ "%cd Mask_RCNN/", "/content/Mask_RCNN\n" ], [ "!pip install -r requirements.txt", "_____no_output_____" ], [ "%run setup.py install", "WARNING:root:Fail load requirements file, so using default ones.\n" ], [ "!wget https://raw.githubusercontent.com/Prady96/Pothole-Detection/avi_testing/custom.py?token=AHIVHIOGTWT7LA4IIWMEJVS455SIO", "--2019-05-23 05:38:12-- https://raw.githubusercontent.com/Prady96/Pothole-Detection/avi_testing/custom.py?token=AHIVHIOGTWT7LA4IIWMEJVS455SIO\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 14555 (14K) [text/plain]\nSaving to: ‘custom.py?token=AHIVHIOGTWT7LA4IIWMEJVS455SIO’\n\n\r custom.py 0%[ ] 0 --.-KB/s \rcustom.py?token=AHI 100%[===================>] 14.21K --.-KB/s in 0.003s \n\n2019-05-23 05:38:12 (4.05 MB/s) - ‘custom.py?token=AHIVHIOGTWT7LA4IIWMEJVS455SIO’ saved [14555/14555]\n\n" ], [ "!mv custom.py\\?token\\=AHIVHIOGTWT7LA4IIWMEJVS455SIO custom.py", "_____no_output_____" ], [ "!ls", "assets\t custom.py LICENSE\t mrcnn\t samples\nbuild\t dist\t MANIFEST.in\t README.md\t setup.cfg\ncustomImages images\t mask_rcnn.egg-info requirements.txt setup.py\n" ], [ "!mkdir logs", "_____no_output_____" ], [ "import os\nimport sys\nimport itertools\nimport math\nimport logging\nimport json\nimport re\nimport random\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nfrom matplotlib.patches import Polygon\n\n# Root directory of the project\nROOT_DIR = os.getcwd()\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nfrom mrcnn import visualize\nfrom mrcnn.visualize import display_images\nimport mrcnn.model as modellib\nfrom mrcnn.model import log\n\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\nimport custom\n\n%matplotlib inline", "Using TensorFlow backend.\n" ], [ "config = custom.CustomConfig()\nCUSTOM_DIR = os.path.join(ROOT_DIR, \"customImages\")\nprint(CUSTOM_DIR)", "/content/Mask_RCNN/customImages\n" ], [ "# Load dataset\n# Get the dataset from the releases page\n# https://github.com/matterport/Mask_RCNN/releases\ndataset = custom.CustomDataset()\ndataset.load_custom(CUSTOM_DIR, \"train\")\n\n# Must call before using the dataset\ndataset.prepare()\n\nprint(\"Image Count: {}\".format(len(dataset.image_ids)))\nprint(\"Class Count: {}\".format(dataset.num_classes))\nfor i, info in enumerate(dataset.class_info):\n print(\"{:3}. {:50}\".format(i, info['name']))", "Image Count: 1457\nClass Count: 2\n 0. BG \n 1. damage \n" ], [ "class InferenceConfig(custom.CustomConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\nconfig = InferenceConfig()\nconfig.display()", "\nConfigurations:\nBACKBONE resnet101\nBACKBONE_STRIDES [4, 8, 16, 32, 64]\nBATCH_SIZE 1\nBBOX_STD_DEV [0.1 0.1 0.2 0.2]\nCOMPUTE_BACKBONE_SHAPE None\nDETECTION_MAX_INSTANCES 100\nDETECTION_MIN_CONFIDENCE 0.7\nDETECTION_NMS_THRESHOLD 0.3\nFPN_CLASSIF_FC_LAYERS_SIZE 1024\nGPU_COUNT 1\nGRADIENT_CLIP_NORM 5.0\nIMAGES_PER_GPU 1\nIMAGE_CHANNEL_COUNT 3\nIMAGE_MAX_DIM 1024\nIMAGE_META_SIZE 14\nIMAGE_MIN_DIM 800\nIMAGE_MIN_SCALE 0\nIMAGE_RESIZE_MODE square\nIMAGE_SHAPE [1024 1024 3]\nLEARNING_MOMENTUM 0.9\nLEARNING_RATE 0.001\nLOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}\nMASK_POOL_SIZE 14\nMASK_SHAPE [28, 28]\nMAX_GT_INSTANCES 100\nMEAN_PIXEL [123.7 116.8 103.9]\nMINI_MASK_SHAPE (56, 56)\nNAME damage\nNUM_CLASSES 2\nPOOL_SIZE 7\nPOST_NMS_ROIS_INFERENCE 1000\nPOST_NMS_ROIS_TRAINING 2000\nPRE_NMS_LIMIT 6000\nROI_POSITIVE_RATIO 0.33\nRPN_ANCHOR_RATIOS [0.5, 1, 2]\nRPN_ANCHOR_SCALES (32, 64, 128, 256, 512)\nRPN_ANCHOR_STRIDE 1\nRPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2]\nRPN_NMS_THRESHOLD 0.7\nRPN_TRAIN_ANCHORS_PER_IMAGE 256\nSTEPS_PER_EPOCH 1000\nTOP_DOWN_PYRAMID_SIZE 256\nTRAIN_BN False\nTRAIN_ROIS_PER_IMAGE 200\nUSE_MINI_MASK True\nUSE_RPN_ROIS True\nVALIDATION_STEPS 50\nWEIGHT_DECAY 0.0001\n\n\n" ], [ "##################### MODEL FILE HERE ##################\n### FOR 320 epoch\n!youtube-dl https://drive.google.com/file/d/1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy/view?usp=sharing\n### FOR 160 epoch\n!youtube-dl https://drive.google.com/file/d/1ex7Mo62j7wugrZbmNFZFAuujd_UguRYK/view?usp=sharing", "[GoogleDrive] 1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy: Downloading webpage\n[GoogleDrive] 1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy: Requesting source file\n[download] Destination: mask_rcnn_damage_0320.h5-1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy.h5\n\u001b[K[download] 100% of 244.01MiB in 00:02\n[GoogleDrive] 1ex7Mo62j7wugrZbmNFZFAuujd_UguRYK: Downloading webpage\n[GoogleDrive] 1ex7Mo62j7wugrZbmNFZFAuujd_UguRYK: Requesting source file\n[download] Destination: mask_rcnn_damage_0160.h5-1ex7Mo62j7wugrZbmNFZFAuujd_UguRYK.h5\n\u001b[K[download] 100% of 244.01MiB in 00:02\n" ], [ "!ls", "assets\nbuild\ncustomImages\ncustom.py\ndist\nimages\nLICENSE\nlogs\nMANIFEST.in\nmask_rcnn_damage_0160.h5-1ex7Mo62j7wugrZbmNFZFAuujd_UguRYK.h5\nmask_rcnn_damage_0320.h5-1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy.h5\nmask_rcnn.egg-info\nmrcnn\n__pycache__\nREADME.md\nrequirements.txt\nsamples\nsetup.cfg\nsetup.py\n" ], [ "!mv mask_rcnn_damage_0160.h5-1ex7Mo62j7wugrZbmNFZFAuujd_UguRYK.h5 mask_rcnn_damage_0160.h5", "_____no_output_____" ], [ "!mv mask_rcnn_damage_0160.h5 logs/", "_____no_output_____" ], [ "!ls", "assets\t mask_rcnn_damage_0320.h5-1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy.h5\nbuild\t mask_rcnn.egg-info\ncustomImages mrcnn\ncustom.py __pycache__\ndist\t README.md\nimages\t requirements.txt\nLICENSE samples\nlogs\t setup.cfg\nMANIFEST.in setup.py\n" ], [ "!mv mask_rcnn_damage_0320.h5-1aShefxzQmeB1qerh1Xo2Xkm1SPIy_yzy.h5 mask_rcnn_damage_0320.h5", "_____no_output_____" ], [ "!mv mask_rcnn_damage_0320.h5 logs/", "_____no_output_____" ], [ "!ls logs/", "mask_rcnn_damage_0160.h5 mask_rcnn_damage_0320.h5\n" ], [ "\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(\"logs/mask_rcnn_damage_0320.h5\", by_name=True)", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From /content/Mask_RCNN/mrcnn/model.py:772: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n" ], [ "class_names = ['BG', 'damage']", "_____no_output_____" ], [ "!pip install utils", "Collecting utils\n Downloading https://files.pythonhosted.org/packages/9b/de/9ffaf89be661b32d1e0cff05e1af5e4fc2d608c47498975e94aca219aed4/utils-0.9.0-py2.py3-none-any.whl\nInstalling collected packages: utils\nSuccessfully installed utils-0.9.0\n" ], [ "import os\nimport sys\nimport custom\nimport utils", "_____no_output_____" ], [ "%cd mrcnn", "/content/Mask_RCNN/mrcnn\n" ], [ "import model as modellib", "_____no_output_____" ], [ "%cd ..", "/content/Mask_RCNN\n" ], [ "import cv2\nimport numpy as np", "_____no_output_____" ], [ "## Testing\nfrom PIL import Image, ImageDraw, ImageFont", "_____no_output_____" ] ], [ [ "MoveOver for Getting Testing Images Similar to S3 Bucket", "_____no_output_____" ] ], [ [ "!youtube-dl https://drive.google.com/file/d/1FTvc361O9BBURgsTMb6dJoE6InAoic_O/view?usp=sharing", "[GoogleDrive] 1FTvc361O9BBURgsTMb6dJoE6InAoic_O: Downloading webpage\n[GoogleDrive] 1FTvc361O9BBURgsTMb6dJoE6InAoic_O: Requesting source file\n[download] Destination: images.zip-1FTvc361O9BBURgsTMb6dJoE6InAoic_O.zip\n\u001b[K[download] 100% of 3.42MiB in 00:02\n" ], [ "!ls", "assets\t\t\t\t\t\t MANIFEST.in\nbuild\t\t\t\t\t\t mask_rcnn.egg-info\ncustomImages\t\t\t\t\t mrcnn\ncustom.py\t\t\t\t\t __pycache__\ndist\t\t\t\t\t\t README.md\nimages\t\t\t\t\t\t requirements.txt\nimages.zip-1FTvc361O9BBURgsTMb6dJoE6InAoic_O.zip samples\nLICENSE\t\t\t\t\t\t setup.cfg\nlogs\t\t\t\t\t\t setup.py\n" ], [ "!mv images.zip-1FTvc361O9BBURgsTMb6dJoE6InAoic_O.zip images.zip", "_____no_output_____" ], [ "!mkdir S3_Images", "_____no_output_____" ], [ "!mv images.zip S3_Images/", "_____no_output_____" ], [ "%cd S3_Images/", "/content/Mask_RCNN/S3_Images\n" ], [ "!ls", "images.zip\n" ], [ "!unzip images.zip", "Archive: images.zip\n creating: images 2/\n inflating: images 2/3a8b1a78-81fa-44a8-8103-58e57b9722f1.mov-0001.jpg \n creating: __MACOSX/\n creating: __MACOSX/images 2/\n inflating: __MACOSX/images 2/._3a8b1a78-81fa-44a8-8103-58e57b9722f1.mov-0001.jpg \n inflating: images 2/img1bb80-lat-28.5557378-long-77.2658383.jpeg \n inflating: __MACOSX/images 2/._img1bb80-lat-28.5557378-long-77.2658383.jpeg \n inflating: images 2/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg \n inflating: __MACOSX/images 2/._3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg \n inflating: images 2/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg \n inflating: __MACOSX/images 2/._03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg \n inflating: images 2/4173.jpg \n inflating: __MACOSX/images 2/._4173.jpg \n inflating: images 2/.DS_Store \n inflating: __MACOSX/images 2/._.DS_Store \n inflating: images 2/171.jpg \n inflating: __MACOSX/images 2/._171.jpg \n inflating: images 2/pgm-bw-115-_145.jpg \n inflating: __MACOSX/images 2/._pgm-bw-115-_145.jpg \n inflating: images 2/0b6149c6-eb02-4fb4-b876-ea2d9c4bffa3.mov-0001.jpg \n inflating: __MACOSX/images 2/._0b6149c6-eb02-4fb4-b876-ea2d9c4bffa3.mov-0001.jpg \n inflating: images 2/Muroran_20170921100101.jpg \n inflating: __MACOSX/images 2/._Muroran_20170921100101.jpg \n inflating: images 2/IMG_4991.JPG \n inflating: __MACOSX/images 2/._IMG_4991.JPG \n inflating: images 2/img1c5b4-lat-28.5536884-long-77.2669158.jpeg \n inflating: __MACOSX/images 2/._img1c5b4-lat-28.5536884-long-77.2669158.jpeg \n inflating: images 2/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg \n inflating: __MACOSX/images 2/._3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg \n inflating: images 2/5587.jpg \n inflating: __MACOSX/images 2/._5587.jpg \n inflating: images 2/D1Pm2yBUYAAJweG.jpg \n inflating: __MACOSX/images 2/._D1Pm2yBUYAAJweG.jpg \n inflating: images 2/5e40998f-9b81-4027-83b1-0ffa553d22df.mov-0001.jpg \n inflating: __MACOSX/images 2/._5e40998f-9b81-4027-83b1-0ffa553d22df.mov-0001.jpg \n inflating: images 2/test_275.jpg \n inflating: __MACOSX/images 2/._test_275.jpg \n inflating: images 2/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg \n inflating: __MACOSX/images 2/._3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg \n inflating: images 2/D0bXkP7UUAAH2ui.jpg \n inflating: __MACOSX/images 2/._D0bXkP7UUAAH2ui.jpg \n inflating: images 2/5fc8fa99-bf2f-4be6-a111-6fd00d240084.mov-0001.jpg \n inflating: __MACOSX/images 2/._5fc8fa99-bf2f-4be6-a111-6fd00d240084.mov-0001.jpg \n inflating: images 2/\\frame3.jpg \n inflating: __MACOSX/images 2/._\\frame3.jpg \n inflating: images 2/img0d7ac-lat-28.5536884-long-77.2669158.jpeg \n inflating: __MACOSX/images 2/._img0d7ac-lat-28.5536884-long-77.2669158.jpeg \n inflating: images 2/Muroran_20170920113720.jpg \n inflating: __MACOSX/images 2/._Muroran_20170920113720.jpg \n inflating: images 2/img00bd1-lat-28.5536884-long-77.2669158.jpeg \n inflating: __MACOSX/images 2/._img00bd1-lat-28.5536884-long-77.2669158.jpeg \n inflating: images 2/GtooxLZodzIddzZT.jpg \n inflating: __MACOSX/images 2/._GtooxLZodzIddzZT.jpg \n inflating: images 2/1c1d50d9-cc5b-45af-ab5a-54a0d8314b07.mov-0001.jpg \n inflating: __MACOSX/images 2/._1c1d50d9-cc5b-45af-ab5a-54a0d8314b07.mov-0001.jpg \n inflating: images 2/img01aa4-lat-28.5557378-long-77.2658383.jpeg \n inflating: __MACOSX/images 2/._img01aa4-lat-28.5557378-long-77.2658383.jpeg \n inflating: images 2/IMG_6746.JPG \n inflating: __MACOSX/images 2/._IMG_6746.JPG \n inflating: images 2/Adachi_20170911112605.jpg \n inflating: __MACOSX/images 2/._Adachi_20170911112605.jpg \n inflating: images 2/img0ce18-lat-28.5557378-long-77.2658383.jpeg \n inflating: __MACOSX/images 2/._img0ce18-lat-28.5557378-long-77.2658383.jpeg \n inflating: images 2/img1a9e0-lat-28.5557378-long-77.2658383.jpeg \n inflating: __MACOSX/images 2/._img1a9e0-lat-28.5557378-long-77.2658383.jpeg \n inflating: images 2/pgm-bw-112-_98.jpg \n inflating: __MACOSX/images 2/._pgm-bw-112-_98.jpg \n inflating: images 2/3cc9be97-33f6-4494-a4d6-09d85a1c4932.mov-0001.jpg \n inflating: __MACOSX/images 2/._3cc9be97-33f6-4494-a4d6-09d85a1c4932.mov-0001.jpg \n inflating: images 2/4eda22b6-dec4-42f5-8541-6dd3416be2b7.mov-0001.jpg \n inflating: __MACOSX/images 2/._4eda22b6-dec4-42f5-8541-6dd3416be2b7.mov-0001.jpg \n inflating: images 2/1c8c4406-95d5-4dd6-8cd7-8e7b3792204d.mov-0001.jpg \n inflating: __MACOSX/images 2/._1c8c4406-95d5-4dd6-8cd7-8e7b3792204d.mov-0001.jpg \n inflating: images 2/6a3e381c-d20f-483c-831d-4153dd4a7518.mov-0001.jpg \n inflating: __MACOSX/images 2/._6a3e381c-d20f-483c-831d-4153dd4a7518.mov-0001.jpg \n inflating: images 2/2f2d5006-7dd7-403d-a3ce-f2a50b535977.mov-0001.jpg \n inflating: __MACOSX/images 2/._2f2d5006-7dd7-403d-a3ce-f2a50b535977.mov-0001.jpg \n inflating: images 2/IMG_9567.JPG \n inflating: __MACOSX/images 2/._IMG_9567.JPG \n inflating: images 2/img0bfa5-lat-28.5557378-long-77.2658383.jpeg \n inflating: __MACOSX/images 2/._img0bfa5-lat-28.5557378-long-77.2658383.jpeg \n inflating: images 2/IMG_4453.JPG \n inflating: __MACOSX/images 2/._IMG_4453.JPG \n inflating: images 2/potholes-3-greenfield-rd-psweeting.jpg \n inflating: __MACOSX/images 2/._potholes-3-greenfield-rd-psweeting.jpg \n inflating: images 2/351.jpg \n inflating: __MACOSX/images 2/._351.jpg \n inflating: images 2/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg \n inflating: __MACOSX/images 2/._1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg \n" ], [ "!ls", "'images 2' images.zip __MACOSX\n" ], [ "!rm -rf images.zip\n!rm -rf __MACOSX/", "_____no_output_____" ], [ "!mv images\\ 2 images", "_____no_output_____" ], [ "!ls", "images\n" ], [ "!ls images", " 03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\n 0b6149c6-eb02-4fb4-b876-ea2d9c4bffa3.mov-0001.jpg\n 171.jpg\n 1c1d50d9-cc5b-45af-ab5a-54a0d8314b07.mov-0001.jpg\n 1c8c4406-95d5-4dd6-8cd7-8e7b3792204d.mov-0001.jpg\n 1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\n 2f2d5006-7dd7-403d-a3ce-f2a50b535977.mov-0001.jpg\n 351.jpg\n 3a8b1a78-81fa-44a8-8103-58e57b9722f1.mov-0001.jpg\n 3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\n 3cc9be97-33f6-4494-a4d6-09d85a1c4932.mov-0001.jpg\n 3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\n 3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\n 4173.jpg\n 4eda22b6-dec4-42f5-8541-6dd3416be2b7.mov-0001.jpg\n 5587.jpg\n 5e40998f-9b81-4027-83b1-0ffa553d22df.mov-0001.jpg\n 5fc8fa99-bf2f-4be6-a111-6fd00d240084.mov-0001.jpg\n 6a3e381c-d20f-483c-831d-4153dd4a7518.mov-0001.jpg\n Adachi_20170911112605.jpg\n D0bXkP7UUAAH2ui.jpg\n D1Pm2yBUYAAJweG.jpg\n'\\frame3.jpg'\n GtooxLZodzIddzZT.jpg\n img00bd1-lat-28.5536884-long-77.2669158.jpeg\n img01aa4-lat-28.5557378-long-77.2658383.jpeg\n img0bfa5-lat-28.5557378-long-77.2658383.jpeg\n img0ce18-lat-28.5557378-long-77.2658383.jpeg\n img0d7ac-lat-28.5536884-long-77.2669158.jpeg\n img1a9e0-lat-28.5557378-long-77.2658383.jpeg\n img1bb80-lat-28.5557378-long-77.2658383.jpeg\n img1c5b4-lat-28.5536884-long-77.2669158.jpeg\n IMG_4453.JPG\n IMG_4991.JPG\n IMG_6746.JPG\n IMG_9567.JPG\n Muroran_20170920113720.jpg\n Muroran_20170921100101.jpg\n pgm-bw-112-_98.jpg\n pgm-bw-115-_145.jpg\n potholes-3-greenfield-rd-psweeting.jpg\n test_275.jpg\n" ], [ "!pwd", "/content/Mask_RCNN/S3_Images\n" ], [ "%cd /content/Mask_RCNN/", "/content/Mask_RCNN\n" ], [ "!ls /content/Mask_RCNN/S3_Images/images/", " 03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\n 0b6149c6-eb02-4fb4-b876-ea2d9c4bffa3.mov-0001.jpg\n 171.jpg\n 1c1d50d9-cc5b-45af-ab5a-54a0d8314b07.mov-0001.jpg\n 1c8c4406-95d5-4dd6-8cd7-8e7b3792204d.mov-0001.jpg\n 1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\n 2f2d5006-7dd7-403d-a3ce-f2a50b535977.mov-0001.jpg\n 351.jpg\n 3a8b1a78-81fa-44a8-8103-58e57b9722f1.mov-0001.jpg\n 3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\n 3cc9be97-33f6-4494-a4d6-09d85a1c4932.mov-0001.jpg\n 3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\n 3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\n 4173.jpg\n 4eda22b6-dec4-42f5-8541-6dd3416be2b7.mov-0001.jpg\n 5587.jpg\n 5e40998f-9b81-4027-83b1-0ffa553d22df.mov-0001.jpg\n 5fc8fa99-bf2f-4be6-a111-6fd00d240084.mov-0001.jpg\n 6a3e381c-d20f-483c-831d-4153dd4a7518.mov-0001.jpg\n Adachi_20170911112605.jpg\n D0bXkP7UUAAH2ui.jpg\n D1Pm2yBUYAAJweG.jpg\n'\\frame3.jpg'\n GtooxLZodzIddzZT.jpg\n img00bd1-lat-28.5536884-long-77.2669158.jpeg\n img01aa4-lat-28.5557378-long-77.2658383.jpeg\n img0bfa5-lat-28.5557378-long-77.2658383.jpeg\n img0ce18-lat-28.5557378-long-77.2658383.jpeg\n img0d7ac-lat-28.5536884-long-77.2669158.jpeg\n img1a9e0-lat-28.5557378-long-77.2658383.jpeg\n img1bb80-lat-28.5557378-long-77.2658383.jpeg\n img1c5b4-lat-28.5536884-long-77.2669158.jpeg\n IMG_4453.JPG\n IMG_4991.JPG\n IMG_6746.JPG\n IMG_9567.JPG\n Muroran_20170920113720.jpg\n Muroran_20170921100101.jpg\n pgm-bw-112-_98.jpg\n pgm-bw-115-_145.jpg\n potholes-3-greenfield-rd-psweeting.jpg\n test_275.jpg\n" ], [ "%cd /content/Mask_RCNN/S3_Images/images/", "/content/Mask_RCNN/S3_Images/images\n" ], [ "!pip install python-resize-image", "Collecting python-resize-image\n Downloading https://files.pythonhosted.org/packages/c7/b5/01e49796187415278796d5c64f8fff750a2e27765155be20876dffaabce3/python_resize_image-1.1.18-py2.py3-none-any.whl\nRequirement already satisfied: requests>=2.19.1 in /usr/local/lib/python3.6/dist-packages (from python-resize-image) (2.21.0)\nCollecting Pillow>=5.1.0 (from python-resize-image)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d2/c2/f84b1e57416755e967236468dcfb0fad7fd911f707185efc4ba8834a1a94/Pillow-6.0.0-cp36-cp36m-manylinux1_x86_64.whl (2.0MB)\n\u001b[K |████████████████████████████████| 2.0MB 13.2MB/s \n\u001b[?25hRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.1->python-resize-image) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.1->python-resize-image) (2019.3.9)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.1->python-resize-image) (1.24.3)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.1->python-resize-image) (2.8)\n\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\nInstalling collected packages: Pillow, python-resize-image\n Found existing installation: Pillow 4.3.0\n Uninstalling Pillow-4.3.0:\n Successfully uninstalled Pillow-4.3.0\nSuccessfully installed Pillow-6.0.0 python-resize-image-1.1.18\n" ], [ "from PIL import Image\nimport os\nfrom resizeimage import resizeimage\n\ncount = 0\n\nfor f in os.listdir(os.getcwd()):\n f_name, f_ext = os.path.splitext(f)\n# f_random, f_lat_name,f_lat_val,f_long_name,f_long_val = f_name.split('-')\n# f_lat_val = f_lat_val.strip() ##removing the white Space\n# f_long_val = f_long_val.strip()\n# new_name = '{}-{}-{}.jpg'.format(f_lat_val,f_long_val,count)\n try:\n with Image.open(f) as image:\n count +=1\n cover = resizeimage.resize_cover(image, [600,600])\n cover.save('{}{}'.format(f_name,f_ext),image.format)\n #os.remove(f)\n print(count)\n except(OSError) as e:\n print('Bad Image {}{}'.format(f,count))", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\nBad Image .DS_Store32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n" ], [ "%cd /content/Mask_RCNN/", "/content/Mask_RCNN\n" ], [ "!wget https://github.com/Prady96/IITM_PythonTraining/blob/master/ImageWorking_add_textInImage/fonts_Dir/OpenSans-Bold.ttf?raw=true", "--2019-05-23 05:40:52-- https://github.com/Prady96/IITM_PythonTraining/blob/master/ImageWorking_add_textInImage/fonts_Dir/OpenSans-Bold.ttf?raw=true\nResolving github.com (github.com)... 140.82.118.4\nConnecting to github.com (github.com)|140.82.118.4|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://github.com/Prady96/IITM_PythonTraining/raw/master/ImageWorking_add_textInImage/fonts_Dir/OpenSans-Bold.ttf [following]\n--2019-05-23 05:40:53-- https://github.com/Prady96/IITM_PythonTraining/raw/master/ImageWorking_add_textInImage/fonts_Dir/OpenSans-Bold.ttf\nReusing existing connection to github.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/Prady96/IITM_PythonTraining/master/ImageWorking_add_textInImage/fonts_Dir/OpenSans-Bold.ttf [following]\n--2019-05-23 05:40:53-- https://raw.githubusercontent.com/Prady96/IITM_PythonTraining/master/ImageWorking_add_textInImage/fonts_Dir/OpenSans-Bold.ttf\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 224592 (219K) [application/octet-stream]\nSaving to: ‘OpenSans-Bold.ttf?raw=true’\n\nOpenSans-Bold.ttf?r 100%[===================>] 219.33K --.-KB/s in 0.02s \n\n2019-05-23 05:40:53 (13.9 MB/s) - ‘OpenSans-Bold.ttf?raw=true’ saved [224592/224592]\n\n" ], [ "!mv OpenSans-Bold.ttf?raw=true OpenSans-Bold.ttf", "_____no_output_____" ], [ "!ls", "assets\t dist MANIFEST.in\t __pycache__\t samples\nbuild\t images mask_rcnn.egg-info README.md\t setup.cfg\ncustomImages LICENSE mrcnn\t\t requirements.txt setup.py\ncustom.py logs OpenSans-Bold.ttf S3_Images\n" ], [ "# Main file for the file iteration\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nmyList = [] ## area list\nclassList = [] ##class Id List\n\ndef random_colors(N):\n np.random.seed(1)\n colors = [tuple(255 * np.random.rand(3)) for _ in range(N)]\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"apply mask to image\"\"\"\n for n, c in enumerate(color):\n image[:, :, n] = np.where(\n mask == 1,\n image[:, :, n] * (1 - alpha) + alpha * c,\n image[:, :, n]\n )\n return image\n\n\ndef display_instances(image, boxes, masks, ids, names, scores):\n \"\"\"\n take the image and results and apply the mask, box, and Label\n \"\"\"\n n_instances = boxes.shape[0]\n colors = random_colors(n_instances)\n\n if not n_instances:\n print('NO INSTANCES TO DISPLAY')\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n\n for i, color in enumerate(colors):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n score = scores[i] if scores is not None else None\n caption = '{} {:.2f}'.format(label, score) if score else label\n mask = masks[:, :, i]\n\n image = apply_mask(image, mask, color)\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\n image = cv2.putText(\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2\n )\n\n return image\n\n\ndef save_image(image, image_name, boxes, masks, class_ids, scores, class_names, filter_classs_names=None,\n scores_thresh=0.1, save_dir=None, mode=0):\n \"\"\"\n image: image array\n image_name: image name\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [num_instances, height, width]\n class_ids: [num_instances]\n scores: confidence scores for each box\n class_names: list of class names of the dataset\n filter_classs_names: (optional) list of class names we want to draw\n scores_thresh: (optional) threshold of confidence scores\n save_dir: (optional) the path to store image\n mode: (optional) select the result which you want\n mode = 0 , save image with bbox,class_name,score and mask;\n mode = 1 , save image with bbox,class_name and score;\n mode = 2 , save image with class_name,score and mask;\n mode = 3 , save mask with black background;\n \"\"\"\n mode_list = [0, 1, 2, 3]\n assert mode in mode_list, \"mode's value should in mode_list %s\" % str(mode_list)\n\n if save_dir is None:\n save_dir = os.path.join(os.getcwd(), \"output\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n useful_mask_indices = []\n\n N = boxes.shape[0]\n if not N:\n print(\"\\n*** No instances in image %s to draw *** \\n\" % (image_name))\n return\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n for i in range(N):\n # filter\n class_id = class_ids[i]\n score = scores[i] if scores is not None else None\n if score is None or score < scores_thresh:\n continue\n\n label = class_names[class_id]\n if (filter_classs_names is not None) and (label not in filter_classs_names):\n continue\n\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n\n useful_mask_indices.append(i)\n\n if len(useful_mask_indices) == 0:\n print(\"\\n*** No instances in image %s to draw *** \\n\" % (image_name))\n return\n\n colors = random_colors(len(useful_mask_indices))\n\n if mode != 3:\n masked_image = image.astype(np.uint8).copy()\n else:\n masked_image = np.zeros(image.shape).astype(np.uint8)\n\n if mode != 1:\n for index, value in enumerate(useful_mask_indices):\n masked_image = apply_mask(masked_image, masks[:, :, value], colors[index])\n\n masked_image = Image.fromarray(masked_image)\n\n if mode == 3:\n masked_image.save(os.path.join(save_dir, '%s.jpg' % (image_name)))\n return\n\n draw = ImageDraw.Draw(masked_image)\n colors = np.array(colors).astype(int) * 255\n\n myList = []\n countClassIds = 0\n \n for index, value in enumerate(useful_mask_indices):\n class_id = class_ids[value]\n print('class_id value is {}'.format(class_id))\n if class_id == 1:\n countClassIds += 1\n print('counter for the class ID {}'.format(countClassIds))\n \n \n score = scores[value]\n label = class_names[class_id]\n\n y1, x1, y2, x2 = boxes[value]\n \n# myList = []\n \n ## area of the rectangle\n yVal = y2 - y1\n xVal = x2 - x1\n area = xVal * yVal\n print('area is {}'.format(area))\n myList.append(area)\n \n if mode != 2:\n color = tuple(colors[index])\n draw.rectangle((x1, y1, x2, y2), outline=color)\n\n # Label\n# font = ImageFont.load('/usr/share/fonts/truetype/ttf-bitstream-vera/Vera.ttf')\n font = ImageFont.truetype('OpenSans-Bold.ttf', 15)\n draw.text((x1, y1), \"%s %f\" % (label, score), (255, 255, 255), font)\n\n print(r['class_ids'], r['scores'])\n print(myList)\n# print('value of r is {}'.format(r))\n print('image_name is {}'.format(image_name))\n\n image_name = os.path.basename(image_name)\n print('image name is {}'.format(image_name))\n\n f_name, f_ext = os.path.splitext(image_name)\n #f_lat_val,f_long_val,f_count = f_name.split('-')\n\n #f_lat_val = f_lat_val.strip() ##removing the white Space\n #f_long_val = f_long_val.strip()\n\n# new_name = '{}-{}-{}.jpg'.format(f_lat_val,f_long_val,count)\n# print([area for area in myList if ])\n# print([i for i in range(countClassIds) ])\n \n print(\"avi96 {}\".format(myList[:countClassIds]))\n# myList.pop(countClassIds - 1)\n \n new_name = '{}-{}.jpg'.format(myList, r['scores'])\n# masked_image.save(os.path.join(save_dir, '%s.jpg' % (image_name)))\n print(\"New Name file is {}\".format(new_name))\n print('save_dir is {}'.format(save_dir))\n masked_image.save(os.path.join(save_dir, '%s' % (new_name)))\n print('file Saved {}'.format(new_name))\n# os.rename(image_name, new_name)\n\n\n\nif __name__ == '__main__':\n \"\"\"\n test everything\n \"\"\"\n import os\n import sys\n import custom\n import utils\n import model as modellib\n #import visualize\n\n # We use a K80 GPU with 24GB memory, which can fit 3 images.\n batch_size = 3\n\n ROOT_DIR = os.getcwd()\n MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n VIDEO_DIR = os.path.join(ROOT_DIR, \"videos\")\n VIDEO_SAVE_DIR = os.path.join(VIDEO_DIR, \"save\")\n# COCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_damage_0010.h5\")\n# if not os.path.exists(COCO_MODEL_PATH):\n# utils.download_trained_weights(COCO_MODEL_PATH)\n\n class InferenceConfig(custom.CustomConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = batch_size\n\n config = InferenceConfig()\n config.display()\n\n model = modellib.MaskRCNN(\n mode=\"inference\", model_dir=MODEL_DIR, config=config\n )\n model.load_weights(\"logs/mask_rcnn_damage_0160.h5\", by_name=True)\n class_names = [\n 'BG', 'damage'\n ]\n\n# capture = cv2.VideoCapture(os.path.join(VIDEO_DIR, 'trailer1.mp4'))\n try:\n if not os.path.exists(VIDEO_SAVE_DIR):\n os.makedirs(VIDEO_SAVE_DIR)\n except OSError:\n print ('Error: Creating directory of data')\n\n # points to be done before final coding\n \"\"\"\n path_for_image_dir\n list for the image array\n resolve for naming convention for location basis\n passing image in model\n \"\"\"\n\n # path for the data files\n data_path = '/content/Mask_RCNN/S3_Images/images/'\n onlyfiles = [f for f in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, f))]\n\n # empty list for the training data\n frames = []\n frame_count = 0\n batch_count = 1\n\n # enumerate the iteration with number of files\n for j, files in enumerate(onlyfiles):\n image_path = data_path + onlyfiles[j]\n# print(\"image Path {}\".format(image_path))\n# print(\"Only Files {}\".format(onlyfiles[j]))\n# print('j is {}'.format(j))\n# print('files is {}'.format(files))\n try:\n images = cv2.imread(image_path).astype(np.uint8)\n# print(\"images {}\".format(images))\n frames.append(np.asarray(images, dtype=np.uint8))\n # frames.append(images)\n frame_count += 1\n print('frame_count :{0}'.format(frame_count))\n if len(frames) == batch_size:\n results = model.detect(frames, verbose=0)\n print('Predicted')\n for i, item in enumerate(zip(frames, results)):\n# print('i is {}'.format(i))\n# print('item is {}'.format(item))\n frame = item[0]\n r = item[1]\n frame = display_instances(\n frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']\n )\n name = '{}'.format(files)\n name = os.path.join(VIDEO_SAVE_DIR, name)\n# name = '{0}.jpg'.format(frame_count + i - batch_size)\n# name = os.path.join(VIDEO_SAVE_DIR, name)\n# cv2.imwrite(name, frame)\n# print(name)\n print('writing to file:{0}'.format(name))\n# print(name)\n save_image(images, name, r['rois'], r['masks'], r['class_ids'],\n r['scores'], class_names, save_dir=VIDEO_SAVE_DIR, mode=0)\n frames = []\n print('clear')\n # clear the frames here\n\n except(AttributeError) as e:\n print('Bad Image {}'.format(image_path))\n\n print(\"Success, check the folder\")\n\n\n\"\"\"\n ## Code for the video section\n frames = []\n frame_count = 0\n # these 2 lines can be removed if you dont have a 1080p camera.\n capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n\n\n while True:\n ret, frame = capture.read()\n # Bail out when the video file ends\n if not ret:\n break\n\n # Save each frame of the video to a list\n frame_count += 1\n frames.append(frame)\n print('frame_count :{0}'.format(frame_count))\n if len(frames) == batch_size:\n results = model.detect(frames, verbose=0)\n print('Predicted')\n for i, item in enumerate(zip(frames, results)):\n frame = item[0]\n r = item[1]\n frame = display_instances(\n frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']\n )\n# name = '{0}.jpg'.format(frame_count + i - batch_size)\n# name = os.path.join(VIDEO_SAVE_DIR, name)\n# cv2.imwrite(name, frame)\n# print('writing to file:{0}'.format(name))\n ## add visualise files\n# visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n# class_names, r['scores'])\n save_image(image, name, r['rois'], r['masks'], r['class_ids'],\n r['scores'],class_names, save_dir=VIDEO_SAVE_DIR, mode=0)\n# print(r['class_ids'], r['scores'])\n\n # Clear the frames array to start the next batch\n frames = []\n\n capture.release()\n\"\"\"", "\nConfigurations:\nBACKBONE resnet101\nBACKBONE_STRIDES [4, 8, 16, 32, 64]\nBATCH_SIZE 3\nBBOX_STD_DEV [0.1 0.1 0.2 0.2]\nCOMPUTE_BACKBONE_SHAPE None\nDETECTION_MAX_INSTANCES 100\nDETECTION_MIN_CONFIDENCE 0.7\nDETECTION_NMS_THRESHOLD 0.3\nFPN_CLASSIF_FC_LAYERS_SIZE 1024\nGPU_COUNT 1\nGRADIENT_CLIP_NORM 5.0\nIMAGES_PER_GPU 3\nIMAGE_CHANNEL_COUNT 3\nIMAGE_MAX_DIM 1024\nIMAGE_META_SIZE 14\nIMAGE_MIN_DIM 800\nIMAGE_MIN_SCALE 0\nIMAGE_RESIZE_MODE square\nIMAGE_SHAPE [1024 1024 3]\nLEARNING_MOMENTUM 0.9\nLEARNING_RATE 0.001\nLOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}\nMASK_POOL_SIZE 14\nMASK_SHAPE [28, 28]\nMAX_GT_INSTANCES 100\nMEAN_PIXEL [123.7 116.8 103.9]\nMINI_MASK_SHAPE (56, 56)\nNAME damage\nNUM_CLASSES 2\nPOOL_SIZE 7\nPOST_NMS_ROIS_INFERENCE 1000\nPOST_NMS_ROIS_TRAINING 2000\nPRE_NMS_LIMIT 6000\nROI_POSITIVE_RATIO 0.33\nRPN_ANCHOR_RATIOS [0.5, 1, 2]\nRPN_ANCHOR_SCALES (32, 64, 128, 256, 512)\nRPN_ANCHOR_STRIDE 1\nRPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2]\nRPN_NMS_THRESHOLD 0.7\nRPN_TRAIN_ANCHORS_PER_IMAGE 256\nSTEPS_PER_EPOCH 1000\nTOP_DOWN_PYRAMID_SIZE 256\nTRAIN_BN False\nTRAIN_ROIS_PER_IMAGE 200\nUSE_MINI_MASK True\nUSE_RPN_ROIS True\nVALIDATION_STEPS 50\nWEIGHT_DECAY 0.0001\n\n\nframe_count :1\nframe_count :2\nframe_count :3\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/img00bd1-lat-28.5536884-long-77.2669158.jpeg\n\n*** No instances in image /content/Mask_RCNN/videos/save/img00bd1-lat-28.5536884-long-77.2669158.jpeg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/img00bd1-lat-28.5536884-long-77.2669158.jpeg\n\n*** No instances in image /content/Mask_RCNN/videos/save/img00bd1-lat-28.5536884-long-77.2669158.jpeg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/img00bd1-lat-28.5536884-long-77.2669158.jpeg\n\n*** No instances in image /content/Mask_RCNN/videos/save/img00bd1-lat-28.5536884-long-77.2669158.jpeg to draw *** \n\nclear\nframe_count :4\nframe_count :5\nframe_count :6\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/\\frame3.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/\\frame3.jpg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/\\frame3.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/\\frame3.jpg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/\\frame3.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 100835\nclass_id value is 1\ncounter for the class ID 2\narea is 4752\nclass_id value is 1\ncounter for the class ID 3\narea is 31860\n[1 1 1] [0.99894387 0.9983626 0.99591905]\n[100835, 4752, 31860]\nimage_name is /content/Mask_RCNN/videos/save/\\frame3.jpg\nimage name is \\frame3.jpg\navi96 [100835, 4752, 31860]\nNew Name file is [100835, 4752, 31860]-[0.99894387 0.9983626 0.99591905].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [100835, 4752, 31860]-[0.99894387 0.9983626 0.99591905].jpg\nclear\nframe_count :7\nframe_count :8\nframe_count :9\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/Muroran_20170920113720.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/Muroran_20170920113720.jpg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/Muroran_20170920113720.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 250\nclass_id value is 1\ncounter for the class ID 2\narea is 260\n[1 1] [0.9999243 0.8455281]\n[250, 260]\nimage_name is /content/Mask_RCNN/videos/save/Muroran_20170920113720.jpg\nimage name is Muroran_20170920113720.jpg\navi96 [250, 260]\nNew Name file is [250, 260]-[0.9999243 0.8455281].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [250, 260]-[0.9999243 0.8455281].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/Muroran_20170920113720.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/Muroran_20170920113720.jpg to draw *** \n\nclear\nframe_count :10\nframe_count :11\nframe_count :12\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/GtooxLZodzIddzZT.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 429\n[1] [0.9999603]\n[429]\nimage_name is /content/Mask_RCNN/videos/save/GtooxLZodzIddzZT.jpg\nimage name is GtooxLZodzIddzZT.jpg\navi96 [429]\nNew Name file is [429]-[0.9999603].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [429]-[0.9999603].jpg\nwriting to file:/content/Mask_RCNN/videos/save/GtooxLZodzIddzZT.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 4935\n[1] [0.9999962]\n[4935]\nimage_name is /content/Mask_RCNN/videos/save/GtooxLZodzIddzZT.jpg\nimage name is GtooxLZodzIddzZT.jpg\navi96 [4935]\nNew Name file is [4935]-[0.9999962].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [4935]-[0.9999962].jpg\nwriting to file:/content/Mask_RCNN/videos/save/GtooxLZodzIddzZT.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 480\nclass_id value is 1\ncounter for the class ID 2\narea is 416\n[1 1] [0.99973744 0.98802954]\n[480, 416]\nimage_name is /content/Mask_RCNN/videos/save/GtooxLZodzIddzZT.jpg\nimage name is GtooxLZodzIddzZT.jpg\navi96 [480, 416]\nNew Name file is [480, 416]-[0.99973744 0.98802954].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [480, 416]-[0.99973744 0.98802954].jpg\nclear\nframe_count :13\nframe_count :14\nframe_count :15\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/351.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/351.jpg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/351.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/351.jpg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/351.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 546\n[1] [0.9994134]\n[546]\nimage_name is /content/Mask_RCNN/videos/save/351.jpg\nimage name is 351.jpg\navi96 [546]\nNew Name file is [546]-[0.9994134].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [546]-[0.9994134].jpg\nclear\nframe_count :16\nframe_count :17\nframe_count :18\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/img0d7ac-lat-28.5536884-long-77.2669158.jpeg\n\n*** No instances in image /content/Mask_RCNN/videos/save/img0d7ac-lat-28.5536884-long-77.2669158.jpeg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/img0d7ac-lat-28.5536884-long-77.2669158.jpeg\nclass_id value is 1\ncounter for the class ID 1\narea is 816\nclass_id value is 1\ncounter for the class ID 2\narea is 176\n[1 1] [0.99922216 0.99323297]\n[816, 176]\nimage_name is /content/Mask_RCNN/videos/save/img0d7ac-lat-28.5536884-long-77.2669158.jpeg\nimage name is img0d7ac-lat-28.5536884-long-77.2669158.jpeg\navi96 [816, 176]\nNew Name file is [816, 176]-[0.99922216 0.99323297].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [816, 176]-[0.99922216 0.99323297].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/img0d7ac-lat-28.5536884-long-77.2669158.jpeg\n\n*** No instances in image /content/Mask_RCNN/videos/save/img0d7ac-lat-28.5536884-long-77.2669158.jpeg to draw *** \n\nclear\nframe_count :19\nframe_count :20\nframe_count :21\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/potholes-3-greenfield-rd-psweeting.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 250\nclass_id value is 1\ncounter for the class ID 2\narea is 720\n[1 1] [0.98426384 0.9620987 ]\n[250, 720]\nimage_name is /content/Mask_RCNN/videos/save/potholes-3-greenfield-rd-psweeting.jpg\nimage name is potholes-3-greenfield-rd-psweeting.jpg\navi96 [250, 720]\nNew Name file is [250, 720]-[0.98426384 0.9620987 ].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [250, 720]-[0.98426384 0.9620987 ].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/potholes-3-greenfield-rd-psweeting.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/potholes-3-greenfield-rd-psweeting.jpg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/potholes-3-greenfield-rd-psweeting.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 440\n[1] [0.9712983]\n[440]\nimage_name is /content/Mask_RCNN/videos/save/potholes-3-greenfield-rd-psweeting.jpg\nimage name is potholes-3-greenfield-rd-psweeting.jpg\navi96 [440]\nNew Name file is [440]-[0.9712983].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [440]-[0.9712983].jpg\nclear\nframe_count :22\nframe_count :23\nframe_count :24\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 1560\n[1] [0.99997425]\n[1560]\nimage_name is /content/Mask_RCNN/videos/save/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\nimage name is 3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\navi96 [1560]\nNew Name file is [1560]-[0.99997425].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [1560]-[0.99997425].jpg\nwriting to file:/content/Mask_RCNN/videos/save/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 384\nclass_id value is 1\ncounter for the class ID 2\narea is 1449\n[1 1] [0.997591 0.8543571]\n[384, 1449]\nimage_name is /content/Mask_RCNN/videos/save/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\nimage name is 3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\navi96 [384, 1449]\nNew Name file is [384, 1449]-[0.997591 0.8543571].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [384, 1449]-[0.997591 0.8543571].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/3cdffa93-a892-4247-82ff-0aab233dbfa1.mov-0001.jpg to draw *** \n\nclear\nframe_count :25\nframe_count :26\nframe_count :27\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 2775\n[1] [0.9999815]\n[2775]\nimage_name is /content/Mask_RCNN/videos/save/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\nimage name is 3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\navi96 [2775]\nNew Name file is [2775]-[0.9999815].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [2775]-[0.9999815].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/3dade430-d240-447e-a942-5bed90d8d11e.mov-0001.jpg to draw *** \n\nclear\nframe_count :28\nframe_count :29\nframe_count :30\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/img01aa4-lat-28.5557378-long-77.2658383.jpeg\n\n*** No instances in image /content/Mask_RCNN/videos/save/img01aa4-lat-28.5557378-long-77.2658383.jpeg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/img01aa4-lat-28.5557378-long-77.2658383.jpeg\nclass_id value is 1\ncounter for the class ID 1\narea is 286\nclass_id value is 1\ncounter for the class ID 2\narea is 330\nclass_id value is 1\ncounter for the class ID 3\narea is 416\nclass_id value is 1\ncounter for the class ID 4\narea is 418\n[1 1 1 1] [0.98072284 0.92764556 0.87470007 0.873562 ]\n[286, 330, 416, 418]\nimage_name is /content/Mask_RCNN/videos/save/img01aa4-lat-28.5557378-long-77.2658383.jpeg\nimage name is img01aa4-lat-28.5557378-long-77.2658383.jpeg\navi96 [286, 330, 416, 418]\nNew Name file is [286, 330, 416, 418]-[0.98072284 0.92764556 0.87470007 0.873562 ].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [286, 330, 416, 418]-[0.98072284 0.92764556 0.87470007 0.873562 ].jpg\nwriting to file:/content/Mask_RCNN/videos/save/img01aa4-lat-28.5557378-long-77.2658383.jpeg\nclass_id value is 1\ncounter for the class ID 1\narea is 290\n[1] [0.9997447]\n[290]\nimage_name is /content/Mask_RCNN/videos/save/img01aa4-lat-28.5557378-long-77.2658383.jpeg\nimage name is img01aa4-lat-28.5557378-long-77.2658383.jpeg\navi96 [290]\nNew Name file is [290]-[0.9997447].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [290]-[0.9997447].jpg\nclear\nframe_count :31\nframe_count :32\nBad Image /content/Mask_RCNN/S3_Images/images/.DS_Store\nframe_count :33\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 85\nclass_id value is 1\ncounter for the class ID 2\narea is 196\nclass_id value is 1\ncounter for the class ID 3\narea is 75\n[1 1 1] [0.9978288 0.9905183 0.98221266]\n[85, 196, 75]\nimage_name is /content/Mask_RCNN/videos/save/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\nimage name is 3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\navi96 [85, 196, 75]\nNew Name file is [85, 196, 75]-[0.9978288 0.9905183 0.98221266].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [85, 196, 75]-[0.9978288 0.9905183 0.98221266].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/3cb4869a-247a-452e-a9ed-051db987dc27.mov-0001.jpg to draw *** \n\nclear\nframe_count :34\nframe_count :35\nframe_count :36\nPredicted\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/4173.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/4173.jpg to draw *** \n\nwriting to file:/content/Mask_RCNN/videos/save/4173.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 3024\n[1] [0.999912]\n[3024]\nimage_name is /content/Mask_RCNN/videos/save/4173.jpg\nimage name is 4173.jpg\navi96 [3024]\nNew Name file is [3024]-[0.999912].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [3024]-[0.999912].jpg\nwriting to file:/content/Mask_RCNN/videos/save/4173.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 13719\nclass_id value is 1\ncounter for the class ID 2\narea is 10044\nclass_id value is 1\ncounter for the class ID 3\narea is 17577\nclass_id value is 1\ncounter for the class ID 4\narea is 9504\nclass_id value is 1\ncounter for the class ID 5\narea is 3774\n[1 1 1 1 1] [0.99999344 0.9999933 0.99996066 0.99993455 0.99992 ]\n[13719, 10044, 17577, 9504, 3774]\nimage_name is /content/Mask_RCNN/videos/save/4173.jpg\nimage name is 4173.jpg\navi96 [13719, 10044, 17577, 9504, 3774]\nNew Name file is [13719, 10044, 17577, 9504, 3774]-[0.99999344 0.9999933 0.99996066 0.99993455 0.99992 ].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [13719, 10044, 17577, 9504, 3774]-[0.99999344 0.9999933 0.99996066 0.99993455 0.99992 ].jpg\nclear\nframe_count :37\nframe_count :38\nframe_count :39\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 264\n[1] [0.8035971]\n[264]\nimage_name is /content/Mask_RCNN/videos/save/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\nimage name is 1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\navi96 [264]\nNew Name file is [264]-[0.8035971].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [264]-[0.8035971].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg to draw *** \n\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/1f2ea472-beb4-4ee9-b3fd-4de6e26c6d62.mov-0001.jpg to draw *** \n\nclear\nframe_count :40\nframe_count :41\nframe_count :42\nPredicted\nwriting to file:/content/Mask_RCNN/videos/save/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 319\n[1] [0.9997627]\n[319]\nimage_name is /content/Mask_RCNN/videos/save/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\nimage name is 03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\navi96 [319]\nNew Name file is [319]-[0.9997627].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [319]-[0.9997627].jpg\nwriting to file:/content/Mask_RCNN/videos/save/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\nclass_id value is 1\ncounter for the class ID 1\narea is 4556\n[1] [0.99999726]\n[4556]\nimage_name is /content/Mask_RCNN/videos/save/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\nimage name is 03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\navi96 [4556]\nNew Name file is [4556]-[0.99999726].jpg\nsave_dir is /content/Mask_RCNN/videos/save\nfile Saved [4556]-[0.99999726].jpg\nNO INSTANCES TO DISPLAY\nwriting to file:/content/Mask_RCNN/videos/save/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg\n\n*** No instances in image /content/Mask_RCNN/videos/save/03d51b5e-ba51-4b83-a40b-e7294e8e976a.mov-0001.jpg to draw *** \n\nclear\nSuccess, check the folder\n" ], [ "!ls /content/Mask_RCNN/videos/save/", "'[100835, 4752, 31860]-[0.99894387 0.9983626 0.99591905].jpg'\n'[13719, 10044, 17577, 9504, 3774]-[0.99999344 0.9999933 0.99996066 0.99993455 0.99992 ].jpg'\n'[1560]-[0.99997425].jpg'\n'[250, 260]-[0.9999243 0.8455281].jpg'\n'[250, 720]-[0.98426384 0.9620987 ].jpg'\n'[264]-[0.8035971].jpg'\n'[2775]-[0.9999815].jpg'\n'[286, 330, 416, 418]-[0.98072284 0.92764556 0.87470007 0.873562 ].jpg'\n'[290]-[0.9997447].jpg'\n'[3024]-[0.999912].jpg'\n'[319]-[0.9997627].jpg'\n'[384, 1449]-[0.997591 0.8543571].jpg'\n'[429]-[0.9999603].jpg'\n'[440]-[0.9712983].jpg'\n'[4556]-[0.99999726].jpg'\n'[480, 416]-[0.99973744 0.98802954].jpg'\n'[4935]-[0.9999962].jpg'\n'[546]-[0.9994134].jpg'\n'[816, 176]-[0.99922216 0.99323297].jpg'\n'[85, 196, 75]-[0.9978288 0.9905183 0.98221266].jpg'\n" ], [ "!zip -r save.zip /content/Mask_RCNN/videos/save/", " adding: content/Mask_RCNN/videos/save/ (stored 0%)\n adding: content/Mask_RCNN/videos/save/[4556]-[0.99999726].jpg (deflated 0%)\n adding: content/Mask_RCNN/videos/save/[100835, 4752, 31860]-[0.99894387 0.9983626 0.99591905].jpg (deflated 0%)\n adding: content/Mask_RCNN/videos/save/[250, 260]-[0.9999243 0.8455281].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[3024]-[0.999912].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[384, 1449]-[0.997591 0.8543571].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[264]-[0.8035971].jpg (deflated 0%)\n adding: content/Mask_RCNN/videos/save/[1560]-[0.99997425].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[250, 720]-[0.98426384 0.9620987 ].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[816, 176]-[0.99922216 0.99323297].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[4935]-[0.9999962].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[319]-[0.9997627].jpg (deflated 0%)\n adding: content/Mask_RCNN/videos/save/[429]-[0.9999603].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[286, 330, 416, 418]-[0.98072284 0.92764556 0.87470007 0.873562 ].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[2775]-[0.9999815].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[440]-[0.9712983].jpg (deflated 2%)\n adding: content/Mask_RCNN/videos/save/[480, 416]-[0.99973744 0.98802954].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[85, 196, 75]-[0.9978288 0.9905183 0.98221266].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[13719, 10044, 17577, 9504, 3774]-[0.99999344 0.9999933 0.99996066 0.99993455 0.99992 ].jpg (deflated 1%)\n adding: content/Mask_RCNN/videos/save/[546]-[0.9994134].jpg (deflated 2%)\n adding: content/Mask_RCNN/videos/save/[290]-[0.9997447].jpg (deflated 1%)\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0595876e372702a9946aadf6ea31a34dcf6953a
30,984
ipynb
Jupyter Notebook
Starter_Code/credit_risk_resampling.ipynb
AntoJKumar/Risky_Business
08d8be8682630a4a44aae57de648cb8b20f581ba
[ "ADSL" ]
null
null
null
Starter_Code/credit_risk_resampling.ipynb
AntoJKumar/Risky_Business
08d8be8682630a4a44aae57de648cb8b20f581ba
[ "ADSL" ]
null
null
null
Starter_Code/credit_risk_resampling.ipynb
AntoJKumar/Risky_Business
08d8be8682630a4a44aae57de648cb8b20f581ba
[ "ADSL" ]
null
null
null
27.664286
294
0.478892
[ [ [ "# Credit Risk Resampling Techniques", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom collections import Counter", "_____no_output_____" ] ], [ [ "# Read the CSV into DataFrame", "_____no_output_____" ] ], [ [ "# Load the data\nfile_path = Path('Resources/lending_data.csv')\ndf = pd.read_csv(file_path)\ndf.head()", "_____no_output_____" ] ], [ [ "# Split the Data into Training and Testing", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\n\nle.fit(df[\"homeowner\"])\ndf[\"homeowner\"] = le.transform(df[\"homeowner\"])", "_____no_output_____" ], [ "# Create our features\nX = X = df.copy()\nX.drop(\"loan_status\", axis=1, inplace=True)\n\n# Create our target\ny = y = df['loan_status']", "_____no_output_____" ], [ "X.describe()", "_____no_output_____" ], [ "# Check the balance of our target values\ny.value_counts()", "_____no_output_____" ], [ "# Create X_train, X_test, y_train, y_test\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)", "_____no_output_____" ] ], [ [ "## Data Pre-Processing\n\nScale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).", "_____no_output_____" ] ], [ [ "# Create the StandardScaler instance\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()", "_____no_output_____" ], [ "# Fit the Standard Scaler with the training data\n# When fitting scaling functions, only train on the training dataset\nX_scaler = scaler.fit(X_train)", "_____no_output_____" ], [ "# Scale the training and testing data\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)", "_____no_output_____" ] ], [ [ "# Simple Logistic Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression(solver='lbfgs', random_state=1)\nmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\nfrom sklearn.metrics import balanced_accuracy_score\ny_pred = model.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred)", "_____no_output_____" ], [ "# Display the confusion matrix\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test, y_pred)", "_____no_output_____" ], [ "# Print the imbalanced classification report\nfrom imblearn.metrics import classification_report_imbalanced\nprint(classification_report_imbalanced(y_test, y_pred))", " pre rec spe f1 geo iba sup\n\n high_risk 0.85 0.91 0.99 0.88 0.95 0.90 619\n low_risk 1.00 0.99 0.91 1.00 0.95 0.91 18765\n\navg / total 0.99 0.99 0.91 0.99 0.95 0.91 19384\n\n" ] ], [ [ "# Oversampling\n\nIn this section, you will compare two oversampling algorithms to determine which algorithm results in the best performance. You will oversample the data using the naive random oversampling algorithm and the SMOTE algorithm. For each algorithm, be sure to complete the folliowing steps:\n\n1. View the count of the target classes using `Counter` from the collections library. \n3. Use the resampled data to train a logistic regression model.\n3. Calculate the balanced accuracy score from sklearn.metrics.\n4. Print the confusion matrix from sklearn.metrics.\n5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n\nNote: Use a random state of 1 for each sampling algorithm to ensure consistency between tests", "_____no_output_____" ], [ "### Naive Random Oversampling", "_____no_output_____" ] ], [ [ "# Resample the training data with the RandomOversampler\nfrom imblearn.over_sampling import RandomOverSampler\n\nros = RandomOverSampler(random_state=1)\nX_resampled1, y_resampled1 = ros.fit_resample(X_train, y_train)\n\n# View the count of target classes with Counter\nCounter(y_resampled1)", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel1 = LogisticRegression(solver='lbfgs', random_state=1)\nmodel1.fit(X_resampled1, y_resampled1)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred1 = model1.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred1)", "_____no_output_____" ], [ "# Display the confusion matrix\nconfusion_matrix(y_test, y_pred1)", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred1))", " pre rec spe f1 geo iba sup\n\n high_risk 0.84 0.99 0.99 0.91 0.99 0.99 619\n low_risk 1.00 0.99 0.99 1.00 0.99 0.99 18765\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "### SMOTE Oversampling", "_____no_output_____" ] ], [ [ "# Resample the training data with SMOTE\nfrom imblearn.over_sampling import SMOTE\n\nX_resampled2, y_resampled2 = SMOTE(random_state=1, sampling_strategy=1.0).fit_resample(X_train, y_train)\n\n# View the count of target classes with Counter\nCounter(y_resampled2)", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel2 = LogisticRegression(solver='lbfgs', random_state=1)\nmodel2.fit(X_resampled2, y_resampled2)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred2 = model2.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred2)", "_____no_output_____" ], [ "# Display the confusion matrix\nconfusion_matrix(y_test, y_pred2)", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred2))", " pre rec spe f1 geo iba sup\n\n high_risk 0.84 0.99 0.99 0.91 0.99 0.99 619\n low_risk 1.00 0.99 0.99 1.00 0.99 0.99 18765\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "# Undersampling\n\nIn this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps:\n\n1. View the count of the target classes using `Counter` from the collections library. \n3. Use the resampled data to train a logistic regression model.\n3. Calculate the balanced accuracy score from sklearn.metrics.\n4. Display the confusion matrix from sklearn.metrics.\n5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n\nNote: Use a random state of 1 for each sampling algorithm to ensure consistency between tests", "_____no_output_____" ] ], [ [ "# Resample the data using the ClusterCentroids resampler\nfrom imblearn.under_sampling import ClusterCentroids\n\ncc = ClusterCentroids(random_state=1)\nX_resampled3, y_resampled3 = cc.fit_resample(X_train, y_train)\n\n# View the count of target classes with Counter\nCounter(y_resampled3)", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel3 = LogisticRegression(solver='lbfgs', random_state=1)\nmodel3.fit(X_resampled3, y_resampled3)", "_____no_output_____" ], [ "# Calculate the balanced accuracy score\ny_pred3 = model3.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred3)", "_____no_output_____" ], [ "# Display the confusion matrix\nconfusion_matrix(y_test, y_pred3)", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred3, digits = 4))", " pre rec spe f1 geo iba sup\n\n high_risk 0.8440 0.9790 0.9940 0.9065 0.9865 0.9717 619\n low_risk 0.9993 0.9940 0.9790 0.9967 0.9865 0.9746 18765\n\navg / total 0.9943 0.9936 0.9795 0.9938 0.9865 0.9745 19384\n\n" ] ], [ [ "# Combination (Over and Under) Sampling\n\nIn this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps:\n\n1. View the count of the target classes using `Counter` from the collections library. \n3. Use the resampled data to train a logistic regression model.\n3. Calculate the balanced accuracy score from sklearn.metrics.\n4. Display the confusion matrix from sklearn.metrics.\n5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n\nNote: Use a random state of 1 for each sampling algorithm to ensure consistency between tests", "_____no_output_____" ] ], [ [ "# Resample the training data with SMOTEENN\nfrom imblearn.combine import SMOTEENN\n\nsm = SMOTEENN(random_state=1)\nX_resampled4, y_resampled4 = sm.fit_resample(X_train, y_train)\n\n# View the count of target classes with Counter\nCounter(y_resampled4)", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel4 = LogisticRegression(solver='lbfgs', random_state=1)\nmodel4.fit(X_resampled4, y_resampled4)", "_____no_output_____" ], [ "# Calculate the balanced accuracy score\ny_pred4 = model4.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred4)", "_____no_output_____" ], [ "# Display the confusion matrix\nconfusion_matrix(y_test, y_pred4)", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred4))", " pre rec spe f1 geo iba sup\n\n high_risk 0.83 0.99 0.99 0.91 0.99 0.99 619\n low_risk 1.00 0.99 0.99 1.00 0.99 0.99 18765\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "# Final Questions\n\n1. Which model had the best balanced accuracy score?\n\n Oversampling models has best balanced accuracy score at 99%\n\n2. Which model had the best recall score?\n\n Oversampling models have best recall scores around 99%\n\n3. Which model had the best geometric mean score?\n\n oversampling model has best geometric scorce around 99%\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d05981bb30393bafb2a0a0031b3a3a655934ab84
618,437
ipynb
Jupyter Notebook
trabalho2/Caches.ipynb
laurocruz/MC733
2b29ab120d7c9a3bb679a5c8f746bfcebbf8e924
[ "MIT" ]
null
null
null
trabalho2/Caches.ipynb
laurocruz/MC733
2b29ab120d7c9a3bb679a5c8f746bfcebbf8e924
[ "MIT" ]
null
null
null
trabalho2/Caches.ipynb
laurocruz/MC733
2b29ab120d7c9a3bb679a5c8f746bfcebbf8e924
[ "MIT" ]
null
null
null
896.285507
22,302
0.941764
[ [ [ "# Gráficos de desempenho das Caches", "_____no_output_____" ], [ "### Import libs", "_____no_output_____" ] ], [ [ "%matplotlib inline\n##Bibliotecas importadas\n# Biblioteca usada para abrir arquivos CSV\nimport csv\n# Bibilioteca para fazer leitura de datas\nfrom datetime import datetime, timedelta\n# Fazer o ajuste de datas no gráfico\nimport matplotlib.dates as mdate\n# Biblioteca mateḿática\nimport numpy as np\n# Bibloteca para traçar gráficos\nimport matplotlib.pyplot as plt\n#Biblioteca para mudar tamanho o gráfico apresentado\nimport matplotlib.cm as cm\nimport operator as op\nimport os\nimport math", "_____no_output_____" ] ], [ [ "## Generate miss % graphs", "_____no_output_____" ] ], [ [ "for file in os.listdir('cache_csv/percentage'):\n filepath = 'cache_csv/percentage/'+file\n dados = list(csv.reader(open(filepath,'r')))\n \n alg = file.split('.')[0]\n \n mr1 = list()\n mr2 = list()\n mw1 = list()\n mw2 = list()\n mrw1 = list()\n mrw2 = list()\n mi1 = list()\n mi2 = list()\n \n for dado in dados:\n mr1.append(float(dado[0]))\n mw1.append(float(dado[1]))\n mrw1.append(float(dado[2]))\n \n mr2.append(float(dado[3]))\n mw2.append(float(dado[4]))\n mrw2.append(float(dado[5]))\n \n mi1.append(float(dado[6]))\n mi2.append(float(dado[7]))\n \n # Configurações de cache\n x = np.arange(1, 9)\n\n ##### READ MISSES #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mr1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mr2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n \n # Bordas\n #plt.ylim([0,100])\n #plt.xlim([1,9])\n\n # Legendas\n plt.title('L1 x L2 read miss rate (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Read miss rate (%)')\n \n plt.savefig('img/Cache/percentage/cache_' + alg + '_r.png', dpi=300)\n plt.show()\n plt.close()\n \n ##### WRITE MISSES #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mw1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mw2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n\n # Legendas\n plt.title('L1 x L2 write miss rate (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Write miss rate (%)')\n \n plt.savefig('img/Cache/percentage/cache_' + alg + '_w.png', dpi=300)\n plt.show()\n plt.close()\n \n \n ##### TOTAL MISSES (DATA) #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mrw1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mrw2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n \n # Bordas\n #plt.ylim([0,100])\n #plt.xlim([1,9])\n\n # Legendas\n plt.title('L1 x L2 total data miss rate (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Total data miss rate (%)')\n \n plt.savefig('img/Cache/percentage/cache_' + alg + '_rw.png', dpi=300)\n plt.show()\n plt.close()\n \n \n ##### INSTRUCTION MISSES #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mi1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mi2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n\n # Legendas\n plt.title('L1 x L2 instruction miss rate (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Instruction miss rate (%)')\n \n plt.savefig('img/Cache/percentage/cache_' + alg + '_i.png', dpi=300)\n plt.show()\n plt.close()", "_____no_output_____" ] ], [ [ "## Generate graphs of miss numbers", "_____no_output_____" ] ], [ [ "for file in os.listdir('cache_csv/num'):\n filepath = 'cache_csv/num/'+file\n dados = list(csv.reader(open(filepath,'r')))\n \n alg = file.split('.')[0]\n \n mr1 = list()\n mr2 = list()\n mw1 = list()\n mw2 = list()\n mrw1 = list()\n mrw2 = list()\n mi1 = list()\n mi2 = list()\n \n for dado in dados:\n mr1.append(float(dado[0]))\n mw1.append(float(dado[1]))\n mrw1.append(int(dado[2]))\n \n mr2.append(float(dado[3]))\n mw2.append(float(dado[4]))\n mrw2.append(float(dado[5]))\n \n mi1.append(float(dado[6]))\n mi2.append(float(dado[7]))\n \n # Configurações de cache\n x = np.arange(1, 9)\n\n ##### READ MISSES #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mr1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mr2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n \n # Bordas\n #plt.ylim([0,100])\n #plt.xlim([1,9])\n\n # Legendas\n plt.title('L1 x L2 read misses (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Read misses')\n \n plt.savefig('img/Cache/num/cache_' + alg + '_r.png', dpi=300)\n plt.show()\n plt.close()\n \n ##### WRITE MISSES #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mw1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mw2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n\n # Legendas\n plt.title('L1 x L2 write misses (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Write misses')\n \n plt.savefig('img/Cache/num/cache_' + alg + '_w.png', dpi=300)\n plt.show()\n plt.close()\n \n \n ##### TOTAL MISSES (DATA) #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mrw1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mrw2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n \n # Bordas\n #plt.ylim([0,100])\n #plt.xlim([1,9])\n\n # Legendas\n plt.title('L1 x L2 total data misses (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Total data misses')\n \n plt.savefig('img/Cache/num/cache_' + alg + '_rw.png', dpi=300)\n plt.show()\n plt.close()\n \n \n ##### INSTRUCTION MISSES #####\n markerline1, stemlines1, baseline1 = plt.stem(x,mi1)\n markerline2, stemlines2, baseline2 = plt.stem(x,mi2)\n #plt.xticks(x, (1,2,3,4,5,6,7,8))\n \n # Define característica das linhas\n plt.setp(stemlines1, 'linestyle', 'none')\n plt.setp(markerline1, 'linestyle', '-', 'color', 'r')\n plt.setp(baseline1, 'linestyle', 'none')\n \n plt.setp(stemlines2, 'linestyle', 'none')\n plt.setp(markerline2, 'linestyle', '-', 'color', 'b')\n plt.setp(baseline2, 'linestyle', 'none')\n\n # Legendas\n plt.title('L1 x L2 instruction misses (' + alg + ')')\n plt.xlabel('Cache Configurations')\n plt.ylabel('Instruction misses')\n \n plt.savefig('img/Cache/num/cache_' + alg + '_i.png', dpi=300)\n plt.show()\n plt.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0598714b276139cb7c5bfef00fc7a98bf7e1bf8
85,915
ipynb
Jupyter Notebook
teaching_material/session_6/gruppe_8/3shape_final.ipynb
tlh957/DO2021
20c615451240a80bc5a2100e15828dfc4163fd49
[ "MIT" ]
20
2021-09-08T12:14:32.000Z
2021-11-19T11:57:39.000Z
teaching_material/session_6/gruppe_8/3shape_final.ipynb
tlh957/DO2021
20c615451240a80bc5a2100e15828dfc4163fd49
[ "MIT" ]
10
2021-08-12T14:41:18.000Z
2021-11-27T12:41:34.000Z
teaching_material/session_6/gruppe_8/3shape_final.ipynb
tlh957/DO2021
20c615451240a80bc5a2100e15828dfc4163fd49
[ "MIT" ]
20
2021-09-12T22:13:22.000Z
2021-12-07T19:27:05.000Z
33.378011
1,632
0.403922
[ [ [ "## Loading libraries and looking at given data", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "appendix_3=pd.read_excel(\"Appendix_3_august.xlsx\")\nappendix_3", "_____no_output_____" ], [ "print(appendix_3[\"Language\"].value_counts(),)\nprint(appendix_3[\"Country\"].value_counts())", "English 23\nSpanish 9\nArabic 8\nRussian 8\nGerman 5\nMandarin(Chinese) 2\nFrench 2\nGreek 2\nPortuguese 2\nIndonesian 1\nChineese 1\nItalian 1\nRomanian 1\nVietnamese 1\nCzech 1\nCroatian 1\nAmerican 1\nSlovak 1\nSlovenian 1\nPersian 1\nPolish 1\nNepali 1\nTurkish 1\nLithanian 1\nBulgarian 1\nKorean 1\nThai 1\nspanish 1\nHungarian 1\nJapanese 1\nSerbian 1\nGeorgian 1\nName: Language, dtype: int64 Malaysia 1\n Switzerland 1\n Italy 1\n Taiwan 1\n Indonesia 1\n ..\n Venezuela 1\n Iraq 1\n Croatia 1\n Bulgaria 1\n Germany 1\nName: Country, Length: 84, dtype: int64\n" ], [ "pd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\nprint(appendix_3['Country'].to_string(index=False))", " United States\n France\n Korea\n Italy\n Germany\n Australia\n China\n United Kingdom\n Spain\n Canada\n Netherlands\n Ireland\n Poland\n Denmark\n Switzerland\n United Arab Emirates\n Brazil\n Sweden\n Norway\n Singapore\n Taiwan\n Belgium\n Thailand\n Austria\n India\n Japan\n Lebanon\n Israel\n Hong Kong SAR\n Vietnam\n Slovakia\n New Zealand\n Greece\n Romania\n Turkey\n Mexico\n Czech Republic\n South Africa\n Finland\n Lithuania\n Russia\n Hungary\n Ukraine\n Pakistan\n Croatia\n Iceland\n Morocco\n Colombia\n Egypt\n Kuwait\n Bulgaria\n Iran\n Philippines\n Luxembourg\n Serbia\n Slovenia\n Tunisia\n Estonia\n Argentina\n Saudi Arabia\n Portugal\n Uruguay\n Costa Rica\n Chile\n Indonesia\n Jordan\n Cyprus\n Myanmar\n Paraguay\n Armenia\n Bolivia\n Moldova\n Azerbaijan\n Algeria\n Monaco\n Georgia\n Malaysia\n Venezuela\n Iraq\n Nepal\n Puerto Rico\n Liechtenstein\n Latvia\n" ] ], [ [ "## Removing useless data", "_____no_output_____" ] ], [ [ "appendix_3=appendix_3[appendix_3.Language!=\"Københavnsk\"]\nappendix_3=appendix_3.drop([\"Meaningless_ID\"], axis=1)\nappendix_3", "_____no_output_____" ], [ "appendix_3=appendix_3[appendix_3.Licenses!=0]\nappendix_3", "_____no_output_____" ] ], [ [ "## Making usefull languages", "_____no_output_____" ] ], [ [ "def language(var):\n \n \"\"\"Function that returns languages spoken by 3Shapes present support teams. \n If not spoken, return English\"\"\"\n \n if var.lower() in ['english','american']: #If english or \"american\"\n return 'English' #Return English\n if var.lower() in ['spanish']:\n return 'Spanish'\n if var.lower() in ['french']:\n return 'French'\n if var.lower() in ['german']:\n return 'German'\n if var.lower() in ['russian']:\n return 'Russian'\n if var.lower() in ['portuguese']:\n return 'Portuguese'\n if var.lower() in ['italian']:\n return 'Italian'\n if re.search('chin.+', var.lower()): # If lettercombination 'chin' appears:\n return 'Chinese' # Return 'Chinese'\n if var.lower() in ['japanese']:\n return 'Japanese'\n if var.lower() in ['korean']:\n return 'Korean'\n else: \n return 'English' #If not spoken, return English\n\nappendix_3['Support_language'] = appendix_3['Language'].apply(language)\nappendix_3['Support_language'].value_counts() ", "_____no_output_____" ], [ "appendix_3[\"Licenses_per_language\"]=appendix_3.groupby([\"Support_language\"])[\"Licenses\"].transform(\"sum\")", "_____no_output_____" ], [ "appendix_3['Country'] = appendix_3['Country'].str.strip() #Removing initial whitespace\nappendix_3.iloc[1,0]", "_____no_output_____" ] ], [ [ "## Making a column that \"groups\" countries into 3 regions/timezones of the world (Americas, Europe (incl. Middle East and Africa) and Asia)", "_____no_output_____" ] ], [ [ "def region(var):\n \n \"\"\"Function that returns region based on country\"\"\"\n \n if var in ['United States','Canada','Brazil','Mexico','Colombia','Argentina','Uruguay',\n 'Costa Rica','Chile','Paraguay','Bolivia','Venezuela','Puerto Rico']: \n return 'Americas' \n if var in ['France','Italy','Germany','United Kingdom','Spain','Netherlands','Ireland','Poland',\n 'Denmark','Switzerland','United Arab Emirates','Sweden','Norway','Belgium','Austria',\n 'Lebanon','Israel','Slovakia','Greece','Romania','Turkey','Czech Republic','South Africa',\n 'Finland','Lithuania','Russia','Hungary','Ukraine','Pakistan','Croatia','Iceland','Morocco',\n 'Egypt','Kuwait','Bulgaria','Iran','Luxembourg','Serbia','Slovenia','Tunisia','Estonia',\n 'Saudi Arabia','Portugal','Jordan','Cyprus','Armenia','Moldova','Azerbaijan','Algeria',\n 'Monaco','Georgia','Iraq','Liechtenstein','Latvia']:\n return 'Europe'\n if var in ['Korea','Australia','China','Singapore','Taiwan','Thailand','India','Japan',\n 'Hong Kong SAR','Vietnam','New Zealand','Philippines','Indonesia','Myanmar',\n 'Malaysia','Nepal']:\n return 'Asia'\n else:\n return 'No'\n\nappendix_3['Region'] = appendix_3['Country'].apply(region)\nappendix_3['Region'].head(6)", "_____no_output_____" ], [ "appendix_3[\"Licenses_per_region\"]=appendix_3.groupby([\"Region\"])[\"Licenses\"].transform(\"sum\")\nappendix_3[[\"Licenses_per_region\",\"Region\"]].head(6)", "_____no_output_____" ] ], [ [ "## New DataFrame with our three regions/support centers", "_____no_output_____" ] ], [ [ "New_regions=appendix_3.groupby([\"Region\"])[\"Licenses\"].sum().sort_values(ascending=False).to_frame().reset_index()\nNew_regions", "_____no_output_____" ], [ "def employees_needed(var):\n \"\"\" Function that gives number of recuired employees based on licenses\"\"\"\n if var <300:\n return 3\n else:\n return np.ceil((var-300)/200+3)", "_____no_output_____" ], [ "New_regions[\"Employ_needed\"]=New_regions[\"Licenses\"].apply(employees_needed)\nNew_regions.head(3)", "_____no_output_____" ], [ "New_regions[\"Revenue\"]=New_regions[\"Licenses\"]*2000\nNew_regions.head(3)", "_____no_output_____" ] ], [ [ "## Loking at appendix 2 and cleaning useless data, and converting to int.", "_____no_output_____" ] ], [ [ "appendix_2=pd.read_excel(\"Appendix_2_august.xlsx\")\nappendix_2", "_____no_output_____" ], [ "appendix_2=appendix_2.drop([5])\nappendix_2", "_____no_output_____" ], [ "appendix_2['Total cost']=appendix_2['Total cost'].astype(int)\nappendix_2['Average FTE']=appendix_2['Average FTE'].astype(int)", "_____no_output_____" ], [ "print(appendix_2.dtypes)", "Support Center object\nTotal cost int64\nAverage FTE int64\ndtype: object\n" ] ], [ [ "## Getting the cost pr. worker pr. support center", "_____no_output_____" ] ], [ [ "appendix_2[\"Cost_per_FTE\"]=np.round(appendix_2[\"Total cost\"]/appendix_2[\"Average FTE\"])\nappendix_2", "_____no_output_____" ] ], [ [ "## Because of trouble with merge, the values are tranferred manually to the new DataFrame", "_____no_output_____" ] ], [ [ "def regional_center(var):\n \"\"\" Quick function that gives the location of support center\"\"\"\n if var in ['Europe']:\n return 'Ukraine'\n if var in ['Americas']:\n return 'USA'\n if var in ['Asia']:\n return 'China'\nNew_regions[\"Support Center\"]=New_regions[\"Region\"].apply(regional_center)\nNew_regions.head(3)", "_____no_output_____" ], [ "New_regions['Cost per FTE']=[17105,83333,250000]\nNew_regions", "_____no_output_____" ] ], [ [ "## Altering the order of the columns to a more intiutive layout", "_____no_output_____" ] ], [ [ "print(list(New_regions.columns.values)) # ", "['Region', 'Licenses', 'Employ_needed', 'Revenue', 'Support Center', 'Cost per FTE', 'Total cost']\n" ], [ "New_regions=New_regions[['Support Center','Region', 'Licenses','Revenue','Employ_needed','Cost per FTE','Total cost']]\nNew_regions", "_____no_output_____" ] ], [ [ "## Calculation cost and balance values", "_____no_output_____" ] ], [ [ "New_regions['Total cost']=New_regions['Employ_needed']*New_regions['Cost per FTE']\nNew_regions", "_____no_output_____" ], [ "New_regions=New_regions.assign(Balance=New_regions['Revenue'] - New_regions['Total cost'])\nNew_regions", "_____no_output_____" ] ], [ [ "## Making a new DataFrame for the whole project", "_____no_output_____" ] ], [ [ "Whole_project=pd.DataFrame()", "_____no_output_____" ], [ "Whole_project['Licenses']=[New_regions['Licenses'].sum(axis=0)]\nWhole_project['Revenue']=[New_regions['Revenue'].sum(axis=0)]\nWhole_project['Employ_needed']=[New_regions['Employ_needed'].sum(axis=0)]\nWhole_project['Total cost']=[New_regions['Total cost'].sum(axis=0)]\nWhole_project['Balance']=[New_regions['Balance'].sum(axis=0)]\nWhole_project", "_____no_output_____" ], [ "Whole_project['Balance before']=(appendix_3['Licenses'].sum(axis=0)*2000*0.7)-appendix_2['Total cost'].sum(axis=0)\nWhole_project['Gain']=Whole_project['Balance']-Whole_project['Balance before']\nWhole_project", "_____no_output_____" ], [ "Whole_project['Balance + savings']=Whole_project['Balance']+(appendix_2.iloc[0]['Total cost']+appendix_2.iloc[3]['Total cost'])\nWhole_project['Gain + savings']=Whole_project['Balance + savings']-Whole_project['Balance before']\nWhole_project", "_____no_output_____" ] ], [ [ "## Looking at the 3-year forecast with different adoption rates\nFirst off is 10% adoption rate, then 50%, and finally 100%", "_____no_output_____" ] ], [ [ "def adoption(df_out_name,df_in_name,adoption_rate):\n \n \"\"\" A function that takes an adoption rate as input, and calculates usefull parameters \n (licenses, revenue, employees needed, cost and balance) after 3 years. \n An annual growth rate of 10% is given \"\"\"\n \n df_in_name[f'{adoption_rate} adoption, licenses']=round(df_in_name['Licenses']*(1.1**3)*adoption_rate)\n \n df_in_name[f'{adoption_rate} adoption, revenue']=df_in_name[f'{adoption_rate} adoption, licenses']*2000\n \n df_in_name[f'{adoption_rate} adoption, employ_needed']=np.ceil((df_in_name[f'{adoption_rate} adoption, licenses']-300)/200+3)\n \n df_in_name[f'{adoption_rate} adoption, total cost']=round(df_in_name[f'{adoption_rate} adoption, employ_needed']*((New_regions.iloc[0,5]*New_regions.iloc[0,4])+(New_regions.iloc[1,5]*New_regions.iloc[1,4])+(New_regions.iloc[2,5]*New_regions.iloc[2,4]))/New_regions['Employ_needed'].sum())\n \n df_in_name[f'{adoption_rate} adoption, balance']=df_in_name[f'{adoption_rate} adoption, revenue']-df_in_name[f'{adoption_rate} adoption, total cost']\n \n df_out_name=df_in_name\n return df_out_name\nadoption('Whole_project_10',Whole_project,0.1)\n", "_____no_output_____" ], [ "adoption('Whole_project_50',Whole_project,0.5)", "_____no_output_____" ], [ "adoption('Whole_project_50',Whole_project,1)", "_____no_output_____" ], [ "with pd.ExcelWriter('samlet.xlsx') as writer: \n appendix_3.to_excel(writer, sheet_name='Lande,sprog og licenser')\n appendix_2.to_excel(writer, sheet_name='Supportcenter og omkostninger')\n license_country.to_excel(writer, sheet_name='Licenser pr. supportsprog')", "_____no_output_____" ], [ "with pd.ExcelWriter('samlet_2.xlsx') as writer:\n New_regions.to_excel(writer, sheet_name='De tre supportcentre')\n Whole_project.to_excel(writer, sheet_name='Hele projektet')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0598e396cf1adead8cb1a35eab1812567a61d3b
17,196
ipynb
Jupyter Notebook
Modulo4/Ejercicios/Problema1.ipynb
francisrichard/CursoPythonD
9cb5c0c55cbed1fb3fc9e813253703c421f8e07f
[ "Apache-2.0" ]
null
null
null
Modulo4/Ejercicios/Problema1.ipynb
francisrichard/CursoPythonD
9cb5c0c55cbed1fb3fc9e813253703c421f8e07f
[ "Apache-2.0" ]
null
null
null
Modulo4/Ejercicios/Problema1.ipynb
francisrichard/CursoPythonD
9cb5c0c55cbed1fb3fc9e813253703c421f8e07f
[ "Apache-2.0" ]
null
null
null
51.331343
730
0.630379
[ [ [ "# ADN", "_____no_output_____" ], [ "Implemente un programa que identifique a una persona en función de su ADN, según se indica a continuación.", "_____no_output_____" ], [ "<code>$ python dna.py databases/large.csv sequences/5.txt\nLavender</code>", "_____no_output_____" ], [ "## Empezando", "_____no_output_____" ], [ "- Dentro de la carpeta data/adn se encuentra la información necesaria para resolver este ejercicio la cual incluye un archivo de base de datos y archivos txt con las cadenas adn", "_____no_output_____" ], [ "## Antecedentes", "_____no_output_____" ], [ "El ADN, el portador de información genética en los seres vivos, se ha utilizado en la justicia penal durante décadas. Pero, ¿cómo funciona exactamente el perfil de ADN? Dada una secuencia de ADN, ¿cómo pueden los investigadores forenses identificar a quién pertenece?\n\nBueno, el ADN es en realidad solo una secuencia de moléculas llamadas nucleótidos, dispuestas en una forma particular (una doble hélice). Cada nucleótido de ADN contiene una de cuatro bases diferentes: adenina (A), citosina (C), guanina (G) o timina (T). Cada célula humana tiene miles de millones de estos nucleótidos ordenados en secuencia. Algunas porciones de esta secuencia (es decir, el genoma) son iguales, o al menos muy similares, en casi todos los seres humanos, pero otras porciones de la secuencia tienen una mayor diversidad genética y, por tanto, varían más en la población.\n\nUn lugar donde el ADN tiende a tener una alta diversidad genética es en las repeticiones cortas en tándem (STR). Un STR es una secuencia corta de bases de ADN que tiende a repetirse consecutivamente numerosas veces en lugares específicos dentro del ADN de una persona. El número de veces que se repite un STR en particular varía mucho entre los individuos. En las siguientes muestras de ADN, por ejemplo, Alice tiene el STR <code>AGAT</code> repetido cuatro veces en su ADN, mientras que Bob tiene el mismo STR repetido cinco veces.", "_____no_output_____" ], [ "<img src=\"./img/adn.PNG\">", "_____no_output_____" ], [ "El uso de varios STR, en lugar de solo uno, puede mejorar la precisión del perfil de ADN. Si la probabilidad de que dos personas tengan el mismo número de repeticiones para un solo STR es del 5%, y el analista observa 10 STR diferentes, entonces la probabilidad de que dos muestras de ADN coincidan puramente por casualidad es de aproximadamente 1 en 1 billón (asumiendo que todos los STR son independientes entre sí). Entonces, si dos muestras de ADN coinciden en el número de repeticiones para cada uno de los STR, el analista puede estar bastante seguro de que provienen de la misma persona. CODIS, la base de datos de ADN del FBI , utiliza 20 STR diferentes como parte de su proceso de elaboración de perfiles de ADN.\n\n¿Cómo sería una base de datos de ADN de este tipo? Bueno, en su forma más simple, podría imaginarse formateando una base de datos de ADN como un archivo CSV, donde cada fila corresponde a un individuo y cada columna corresponde a un STR particular.", "_____no_output_____" ], [ "<code>name,AGAT,AATG,TATC\nAlice,28,42,14\nBob,17,22,19\nCharlie,36,18,25</code>", "_____no_output_____" ], [ "Los datos del archivo anterior sugerirían que Alice tiene la secuencia <code>AGAT</code> repetida 28 veces consecutivamente en algún lugar de su ADN, la secuencia <code>AATG</code> repetida 42 veces y <code>TATC</code> repetida 14 veces. Bob, mientras tanto, tiene esos mismos tres STR repetidos 17, 22 y 19 veces, respectivamente. Y Charlie tiene esos mismos tres STR repetidos 36, 18 y 25 veces, respectivamente.\n\nEntonces, dada una secuencia de ADN, ¿cómo podría identificar a quién pertenece? Bueno, imagina que buscas en la secuencia de ADN la secuencia consecutiva más larga de <code>AGAT</code>s repetidos y descubres que la secuencia más larga tiene 17 repeticiones. Si luego encontrara que la secuencia más larga de <code>AATG</code> tiene 22 repeticiones y la secuencia más larga de <code>TATC</code> 19 repeticiones, eso proporcionaría una evidencia bastante buena de que el ADN era de Bob. Por supuesto, también es posible que una vez que tome los recuentos de cada uno de los STR, no coincida con nadie en su base de datos de ADN, en cuyo caso no tendrá ninguna coincidencia.\n\nEn la práctica, dado que los analistas saben en qué cromosoma y en qué lugar del ADN se encontrará un STR, pueden localizar su búsqueda en una sección limitada del ADN. Pero ignoraremos ese detalle para este problema.\n\nSu tarea es escribir un programa que tomará una secuencia de ADN y un archivo CSV que contiene recuentos de STR para una lista de individuos y luego generará a quién pertenece el ADN (lo más probable).", "_____no_output_____" ], [ "## Especificaciones", "_____no_output_____" ], [ "En un archivo llamado <code>dna.py</code>, implementar un programa que identifica a la que pertenece una secuencia de ADN.\n\n- El programa debe requerir como primer argumento de línea de comando el nombre de un archivo CSV que contiene los recuentos de STR para una lista de individuos y debe requerir como segundo argumento de línea de comando el nombre de un archivo de texto que contiene la secuencia de ADN para identificar.\n \n - Si su programa se ejecuta con el número incorrecto de argumentos en la línea de comandos, su programa debería imprimir un mensaje de error de su elección (con <code>print</code>). Si se proporciona el número correcto de argumentos, puede suponer que el primer argumento es de hecho el nombre de archivo de un archivo CSV válido y que el segundo argumento es el nombre de archivo de un archivo de texto válido.\n \n- Su programa debería abrir el archivo CSV y leer su contenido en la memoria.\n - Puede suponer que la primera fila del archivo CSV serán los nombres de las columnas. La primera columna será la palabra <code>name</code> y las columnas restantes serán las propias secuencias STR.\n\n- Su programa debería abrir la secuencia de ADN y leer su contenido en la memoria.\n- Para cada uno de los STR (de la primera línea del archivo CSV), su programa debe calcular la ejecución más larga de repeticiones consecutivas del STR en la secuencia de ADN para identificar.\n- Si los conteos de STR coinciden exactamente con cualquiera de las personas en el archivo CSV, su programa debe imprimir el nombre de la persona que coincide.\n - Puede suponer que los recuentos de STR no coincidirán con más de un individuo.\n - Si los recuentos de STR no coinciden exactamente con ninguno de los individuos en el archivo CSV, su programa debería imprimir <code>\"No match\"</code>.", "_____no_output_____" ], [ "## Uso", "_____no_output_____" ], [ "Su programa debería comportarse según los siguientes ejemplos.", "_____no_output_____" ], [ "<code>$ python dna.py databases/large.csv sequences/5.txt\nLavender</code>\n\n\n<code>$ python dna.py\nUsage: python dna.py data.csv sequence.txt </code>\n\n<code>$ python dna.py data.csv\nUsage: python dna.py data.csv sequence.txt</code>", "_____no_output_____" ], [ "## Sugerencia", "_____no_output_____" ], [ "- Puede encontrar <a href='https://docs.python.org/3/library/csv.html'><code>csv</code></a> útil el módulo de Python para leer archivos CSV en la memoria. Es posible que desee aprovechar <a href='https://docs.python.org/3/library/csv.html#csv.reader'><code>csv.reader</code></a> o <a href='https://docs.python.org/3/library/csv.html#csv.DictReader'><code>csv.DictReader</code></a>.\n\n- Las funciones <a href='https://docs.python.org/3.3/tutorial/inputoutput.html#reading-and-writing-files'><code>open</code></a> y <a href='https://docs.python.org/3.3/tutorial/inputoutput.html#methods-of-file-objects'><code>read</code></a> pueden resultar útiles para leer archivos de texto en la memoria.\n- Considere qué estructuras de datos podrían ser útiles para realizar un seguimiento de la información en su programa. A <code>list</code> o a <code>dict</code> pueden resultar útiles.", "_____no_output_____" ], [ "## Pruebas", "_____no_output_____" ], [ "python dna.py databases/small.csv sequences/1.txt.1.txtAsegúrese de probar su código para cada uno de los siguientes.\n\n- Ejecute su programa como <code>python dna.py databases/small.csv sequences/1.txt.</code> Su programa debería generar <code>Bob</code>.\n- Ejecute su programa como <code>python dna.py databases/small.csv sequences/2.txt.</code> Su programa debería generar <code>No</code> match.\n- Ejecute su programa como <code>python dna.py databases/small.csv sequences/3.txt.</code> Su programa debería generar <code>No</code> match.\n- Ejecute su programa como <code>python dna.py databases/small.csv sequences/4.txt.</code> Su programa debería generar <code>Alice</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/5.txt.</code> Su programa debería generar <code>Lavender</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/6.txt.</code> Su programa debería generar <code>Luna</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/7.txt.</code> Su programa debería generar <code>Ron</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/8.txt.</code> Su programa debería generar <code>Ginny</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/9.txt.</code> Su programa debería generar <code>Draco</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/10.txt.</code> Su programa debería generar <code>Albus</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/11.txt.</code> Su programa debería generar <code>Hermione</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/12.txt.</code> Su programa debería generar <code>Lily</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/13.txt.</code> Su programa debería generar <code>No</code> match.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/14.txt.</code> Su programa debería generar <code>Severus</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/15.txt.</code> Su programa debería generar <code>Sirius</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/16.txt.</code> Su programa debería generar <code>No</code> match.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/17.txt.</code> Su programa debería generar <code>Harry</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/18.txt.</code> Su programa debería generar <code>No</code> match.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/19.txt.</code> Su programa debería generar <code>Fred</code>.\n- Ejecute su programa como <code>python dna.py databases/large.csv sequences/20.txt.</code> Su programa debería generar <code>No</code> match.", "_____no_output_____" ] ], [ [ "# Importando librerias\nimport csv\nimport re\n\n#Declarando funciones\ndef conteomaxstr(patron, texto):\n maxrep = 0\n count = 0\n fin = 0\n \n while True:\n encontrado = re.search(patron, texto)\n if encontrado:\n inicio = encontrado.start()\n end = encontrado.end()\n if fin == 0:\n count = 1\n fin = end\n texto = texto.replace(patron, patron.lower(), 1) #hacer un replace del patron para que no vuelva a ser encontrado en la siguiente iteración\n else:\n if inicio == fin:\n texto = texto.replace(patron, patron.lower(), 1) #hacer un replace del patron para que no vuelva a ser encontrado en la siguiente iteración\n count = count + 1\n fin = end\n else:\n #Se reinicia el conteo para los patrones restantes\n fin = 0\n if count > maxrep:\n maxrep = count\n count = 0\n else:\n if count > maxrep:\n maxrep = count\n break\n \n return maxrep\n\ndef busqueda_dna():\n nombre = \"No match\"\n \n try:\n #Se pide el nombre del archivo de .csv\n csvname = input(\"Introduce el nombre del archivo CSV que contiene los recuentos de STR para la lista de individuos:\")\n #Se abre el archivo .csv y se guarda en una colección de datos similar a un diccionario gracias al método csv.DictReader\n with open('./data/dna/databases/' + csvname, newline='') as csvfile:\n try:\n #Se pide el nombre del archivo de .txt\n txtname = input(\"Introduce el nombre del archivo de texto que contiene la secuencia ADN a indentificar:\")\n #Se abre el archivo de texto que contiene el ADN del individuo y se guarda el texto en un string ADN\n with open('./data/dna/sequences/' + txtname, mode = 'r') as txtfile:\n ADN = txtfile.read()\n for fila in csv.DictReader(csvfile):\n cadena = ADN\n if nombre == \"No match\":\n for patron in fila:\n if patron != 'name':\n if int(fila[patron]) == int(conteomaxstr(patron, cadena)):\n nombre = fila['name']\n else:\n nombre = \"No match\"\n break\n else:\n break\n print(nombre)\n except:\n print(f\"El archivo {txtname} no existe\")\n except:\n print(f\"El archivo {csvname} no existe\")", "_____no_output_____" ], [ "busqueda_dna()", "Introduce el nombre del archivo CSV que contiene los recuentos de STR para la lista de individuos: small.csv\nIntroduce el nombre del archivo de texto que contiene la secuencia ADN a indentificar: 1.txt\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
d059c2184e6539a7236f6aed1cf457b5e1c7da06
229,118
ipynb
Jupyter Notebook
Machine learning Project.ipynb
NEHASHARMA1234/ML_Project
b4ea03a629695dd3fc9541a9bb7ccc922b46f95d
[ "MIT" ]
null
null
null
Machine learning Project.ipynb
NEHASHARMA1234/ML_Project
b4ea03a629695dd3fc9541a9bb7ccc922b46f95d
[ "MIT" ]
null
null
null
Machine learning Project.ipynb
NEHASHARMA1234/ML_Project
b4ea03a629695dd3fc9541a9bb7ccc922b46f95d
[ "MIT" ]
null
null
null
54.037264
20,980
0.637095
[ [ [ "import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n%matplotlib inline\nimport matplotlib \nmatplotlib.rcParams[\"figure.figsize\"] = (20,10)", "_____no_output_____" ], [ "df1 = pd.read_csv(\"Bengaluru_House_Data.csv\")\ndf1.head()", "_____no_output_____" ], [ "df1.shape", "_____no_output_____" ], [ "df1.columns", "_____no_output_____" ], [ "df1[\"area_type\"].unique()", "_____no_output_____" ], [ "df1[\"area_type\"].value_counts()", "_____no_output_____" ], [ "# Drop features that are not required to build our model", "_____no_output_____" ], [ "df2=df1.drop(['area_type','availability','society','balcony'] , axis = 'columns')\ndf2.shape\n", "_____no_output_____" ], [ "# Data Cleaning : Handle NA values", "_____no_output_____" ], [ "df2.isnull().sum()", "_____no_output_____" ], [ "df3=df2.dropna()\ndf3.head()", "_____no_output_____" ], [ "df3.isnull().sum()", "_____no_output_____" ], [ "df3.shape", "_____no_output_____" ], [ "# Feature Engineering", "_____no_output_____" ], [ "# Add new feature(integer) for bhk (Bedrooms Hall Kitchen)", "_____no_output_____" ], [ "df3['size'].unique()", "_____no_output_____" ], [ "df3['BHK']=df3['size'].apply(lambda x: int(x.split(' ')[0]))", "<ipython-input-40-6fca8bf23756>:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df3['BHK']=df3['size'].apply(lambda x: int(x.split(' ')[0]))\n" ], [ "df3.head()", "_____no_output_____" ], [ "df3[df3.BHK>20]", "_____no_output_____" ], [ "df3.total_sqft.unique()", "_____no_output_____" ], [ "def is_float(x):\n try:\n float(x)\n except:\n return False\n return True", "_____no_output_____" ], [ "df3[~df3['total_sqft'].apply(is_float)].head(10)", "_____no_output_____" ], [ "df3['total_sqft'].unique()", "_____no_output_____" ], [ "def convert_sqft_to_num(x):\n tokens=x.split(\"-\")\n if len(tokens)==2:\n return(float(tokens[0])+float(tokens[1]))/2\n try:\n return float(x)\n except:\n return None", "_____no_output_____" ], [ "convert_sqft_to_num('627')", "_____no_output_____" ], [ "convert_sqft_to_num('627-7643')", "_____no_output_____" ], [ "df4=df3.copy()", "_____no_output_____" ], [ "df4['total_sqft']=df4['total_sqft'].apply(convert_sqft_to_num)\ndf4.head()", "_____no_output_____" ], [ "df4.loc[30]", "_____no_output_____" ], [ "df5=df4.copy()\ndf5['price_per_sqft']=df5['price']*100000/df5['total_sqft']", "_____no_output_____" ], [ "df5.head()", "_____no_output_____" ], [ "len(df5.location.unique())", "_____no_output_____" ], [ "df=df5['price_per_sqft'].describe()\ndf.head()", "_____no_output_____" ], [ "df5.location=df5.location.apply(lambda x: x.strip())\ndf5.head()", "_____no_output_____" ], [ "df5", "_____no_output_____" ], [ "location_stats=df5['location'].value_counts(ascending=False)\nlocation_stats", "_____no_output_____" ], [ "location_stats.values.sum()", "_____no_output_____" ], [ "location_stats[location_stats<=10]", "_____no_output_____" ], [ "len(location_stats[location_stats<=10])", "_____no_output_____" ], [ "other_location=location_stats[location_stats<=10]\nother_location", "_____no_output_____" ], [ "df5['location']=df5['location'].apply(lambda x: \"other\" if x in other_location else x )\ndf5", "_____no_output_____" ], [ "len(df5[df5.location!=\"other\"])", "_____no_output_____" ], [ "len(df5.location.unique())", "_____no_output_____" ], [ "df5[df5.total_sqft/df5.BHK<300].head()", "_____no_output_____" ], [ "df5.shape\n", "_____no_output_____" ], [ "df6=df5[~(df5.total_sqft/df5.BHK<300)]\ndf6.shape", "_____no_output_____" ], [ "df6.price_per_sqft.describe()", "_____no_output_____" ], [ "def remove_pps_outlier(df):\n df_out=pd.DataFrame()\n for key,subdf in df.groupby('location'):\n m=np.mean(subdf.price_per_sqft)\n st=np.std(subdf.price_per_sqft)\n reduced_df=subdf[(subdf.price_per_sqft>(m-st)) & (subdf.price_per_sqft<=(m+st))]\n df_out=pd.concat([df_out,reduced_df],ignore_index=True)\n return df_out\ndf7=remove_pps_outlier(df6)\ndf7.shape", "_____no_output_____" ], [ "df7.columns", "_____no_output_____" ], [ "df7['price_per_sqft'].unique", "_____no_output_____" ], [ "df7.price_per_sqft.describe()", "_____no_output_____" ], [ "df7", "_____no_output_____" ], [ "def plot_scatter_chart(df,location):\n bhk2=df[(df.location==location) & (df.BHK==2)]\n bhk3=df[(df.location==location) & (df.BHK==3)]\n matplotlib.rcParams['figure.figsize']=(15,10)\n plt.scatter(bhk2.total_sqft,bhk2.price,marker='*',color='red',label='2 BHK',s=50)\n plt.scatter(bhk3.total_sqft,bhk3.price,marker='+',color='blue',label='3 bhk',s=50)\n plt.xlabel('Price(Lakh Indian Rupees)')\n plt.ylabel('Total sqft Area')\n plt.title(location)\n plt.legend()\nplot_scatter_chart(df7,'Rajaji Nagar')\n\n", "_____no_output_____" ], [ "def remove_bhk_outliers(df):\n exclude_indices = np.array([])\n for location, location_df in df.groupby('location'):\n bhk_stats = {}\n for BHK, bhk_df in location_df.groupby('BHK'):\n bhk_stats[BHK] = {\n 'mean': np.mean(bhk_df.price_per_sqft),\n 'std': np.std(bhk_df.price_per_sqft),\n 'count': bhk_df.shape[0]\n }\n for BHK, bhk_df in location_df.groupby('BHK'):\n stats = bhk_stats.get(BHK-1)\n if stats and stats['count']>5:\n exclude_indices = np.append(exclude_indices, bhk_df[bhk_df.price_per_sqft<(stats['mean'])].index.values)\n return df.drop(exclude_indices,axis='index')\ndf8 = remove_bhk_outliers(df7)\n# df8 = df7.copy()\ndf8.shape", "_____no_output_____" ], [ "plot_scatter_chart(df8,'Rajaji Nagar')", "_____no_output_____" ], [ "plot_scatter_chart(df7,'Hebbal')", "_____no_output_____" ], [ "plot_scatter_chart(df8,'Hebbal')", "_____no_output_____" ], [ "import matplotlib\nmatplotlib.rcParams['figure.figsize']=(20,10)\nplt.hist(df8.price_per_sqft,rwidth=0.8)\nplt.xlabel('price per sqaure feet')\nplt.ylabel('count')", "_____no_output_____" ], [ "df8.bath.unique()", "_____no_output_____" ], [ "df8[df8.bath>10]", "_____no_output_____" ], [ "plt.hist(df8.bath,rwidth=0.8)\nplt.xlabel('No. of bathrooms')\nplt.ylabel('Count')", "_____no_output_____" ], [ "df8[df8.bath>df8.BHK+2]", "_____no_output_____" ], [ "df9=df8[df8.bath<df8.BHK+2]\ndf9.shape", "_____no_output_____" ], [ "\ndf10 = df9.drop(['size','price_per_sqft'],axis='columns')\ndf10.head(3)", "_____no_output_____" ], [ "# Use One Hot Encoding for location", "_____no_output_____" ], [ "dummies = pd.get_dummies(df10.location)\ndummies.head(3)", "_____no_output_____" ], [ "df11=pd.concat([df10,dummies.drop('other',axis='columns')],axis='columns')\ndf11.head()", "_____no_output_____" ], [ "df12=df11.drop('location',axis='columns')\ndf12.head()", "_____no_output_____" ], [ "df12.shape", "_____no_output_____" ] ], [ [ "#drop the dependent columns to train and test X", "_____no_output_____" ] ], [ [ "X=df12.drop(['price'],axis='columns')\nX.head()", "_____no_output_____" ], [ "y=df12.price\ny.head()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=10)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nlr_reg=LinearRegression()\nlr_reg.fit(X_train,y_train)\nlr_reg.score(X_test,y_test)", "_____no_output_____" ] ], [ [ "#Use K Fold cross validation to measure accuracy of our LinearRegression model", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import cross_val_score\ncv=ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\ncross_val_score(LinearRegression(),X,y,cv=cv)\n\n", "_____no_output_____" ] ], [ [ "#Find best model using GridSearchCV", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\n\nfrom sklearn.linear_model import Lasso\nfrom sklearn.tree import DecisionTreeRegressor\n\ndef find_best_model_using_gridsearchcv(X,y):\n algos = {\n 'linear_regression' : {\n 'model': LinearRegression(),\n 'params': {\n 'normalize': [True, False]\n }\n },\n 'lasso': {\n 'model': Lasso(),\n 'params': {\n 'alpha': [1,2],\n 'selection': ['random', 'cyclic']\n }\n },\n 'decision_tree': {\n 'model': DecisionTreeRegressor(),\n 'params': {\n 'criterion' : ['mse','friedman_mse'],\n 'splitter': ['best','random']\n }\n }\n }\n scores = []\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n for algo_name, config in algos.items():\n gs = GridSearchCV(config['model'], config['params'], cv=cv, return_train_score=False)\n gs.fit(X,y)\n scores.append({\n 'model': algo_name,\n 'best_score': gs.best_score_,\n 'best_params': gs.best_params_\n })\n\n return pd.DataFrame(scores,columns=['model','best_score','best_params'])\n\nfind_best_model_using_gridsearchcv(X,y)", "_____no_output_____" ], [ "def predict_price(location,sqft,bath,bhk): \n loc_index = np.where(X.columns==location)[0][0]\n\n x = np.zeros(len(X.columns))\n x[0] = sqft\n x[1] = bath\n x[2] = bhk\n if loc_index >= 0:\n x[loc_index] = 1\n\n return lr_reg.predict([x])[0]", "_____no_output_____" ], [ "predict_price('1st Phase JP Nagar',1128, 3, 3)", "_____no_output_____" ], [ "predict_price('Indira Nagar',1000, 3, 3)", "_____no_output_____" ] ], [ [ "#Export the tested model to a pickle file", "_____no_output_____" ] ], [ [ "import pickle\nwith open('banglore_home_prices_model.pickle','wb') as f:\n pickle.dump(lr_reg,f)", "_____no_output_____" ] ], [ [ "#Export location and column information to a file that will be useful later on in our prediction application", "_____no_output_____" ] ], [ [ "import json\ncolumns={\n 'data_columns':[col.lower() for col in X.columns]\n}\nwith open(\"columns.json\",\"w\") as f:\n f.write(json.dumps(columns))\n \n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d059cca0b7e69b79e1b49aaf209b98005e129aa2
4,931
ipynb
Jupyter Notebook
misc/notes/Databases/notes/Associations.ipynb
alittlebirdie00/coma
4670e4be2f687f48b9c75118788aa7f90b72b5d4
[ "MIT" ]
2
2018-09-16T21:39:24.000Z
2019-05-03T06:24:18.000Z
misc/notes/Databases/notes/Associations.ipynb
alittlebirdie00/coma
4670e4be2f687f48b9c75118788aa7f90b72b5d4
[ "MIT" ]
46
2018-08-28T13:38:04.000Z
2019-10-02T18:54:51.000Z
misc/notes/Databases/notes/Associations.ipynb
alittlebirdie00/coma
4670e4be2f687f48b9c75118788aa7f90b72b5d4
[ "MIT" ]
1
2018-08-30T11:08:31.000Z
2018-08-30T11:08:31.000Z
34.006897
187
0.592172
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d059e228e65c78e303e0a4a35bfbd86f20e654de
23,492
ipynb
Jupyter Notebook
site/en/tutorials/estimators/boosted_trees.ipynb
abviv/docs
4ab9d373f693ce804dc21529997de104e3d9e47c
[ "Apache-2.0" ]
4
2019-08-20T11:59:23.000Z
2020-01-12T13:42:50.000Z
site/en/tutorials/estimators/boosted_trees.ipynb
abviv/docs
4ab9d373f693ce804dc21529997de104e3d9e47c
[ "Apache-2.0" ]
null
null
null
site/en/tutorials/estimators/boosted_trees.ipynb
abviv/docs
4ab9d373f693ce804dc21529997de104e3d9e47c
[ "Apache-2.0" ]
1
2020-10-31T13:13:43.000Z
2020-10-31T13:13:43.000Z
33.56
583
0.536906
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "#How to train Boosted Trees models in TensorFlow", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/estimators/boosted_trees\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.\n\nBoosted Trees models are popular with many machine learning practioners as they can achieve impressive performance with minimal hyperparameter tuning.", "_____no_output_____" ], [ "## Load the titanic dataset\nYou will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\ntf.logging.set_verbosity(tf.logging.ERROR)\ntf.set_random_seed(123)\n\n# Load dataset.\ndftrain = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_train.csv')\ndfeval = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_eval.csv')\ny_train = dftrain.pop('survived')\ny_eval = dfeval.pop('survived')", "_____no_output_____" ] ], [ [ "The dataset consists of a training set and an evaluation set:\n\n* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.\n* The model is tested against the *eval set*, `dfeval`, and `y_eval`.\n\nFor training you will use the following features:\n\n\n<table>\n <tr>\n <th>Feature Name</th>\n <th>Description</th>\n </tr>\n <tr>\n <td>sex</td>\n <td>Gender of passenger</td>\n </tr>\n <tr>\n <td>age</td>\n <td>Age of passenger</td>\n </tr>\n <tr>\n <td>n_siblings_spouses</td>\n <td># siblings and partners aboard</td>\n </tr>\n <tr>\n <td>parch</td>\n <td># of parents and children aboard</td>\n </tr>\n <tr>\n <td>fare</td>\n <td>Fare passenger paid.</td>\n </tr>\n <tr>\n <td>class</td>\n <td>Passenger's class on ship</td>\n </tr>\n <tr>\n <td>deck</td>\n <td>Which deck passenger was on</td>\n </tr>\n <tr>\n <td>embark_town</td>\n <td>Which town passenger embarked from</td>\n </tr>\n <tr>\n <td>alone</td>\n <td>If passenger was alone</td>\n </tr>\n</table>", "_____no_output_____" ], [ "## Explore the data", "_____no_output_____" ], [ "Let's first preview some of the data and create summary statistics on the training set.", "_____no_output_____" ] ], [ [ "dftrain.head()", "_____no_output_____" ], [ "dftrain.describe()", "_____no_output_____" ] ], [ [ "There are 627 and 264 examples in the training and evaluation sets, respectively.", "_____no_output_____" ] ], [ [ "dftrain.shape[0], dfeval.shape[0]", "_____no_output_____" ] ], [ [ "The majority of passengers are in their 20's and 30's.", "_____no_output_____" ] ], [ [ "dftrain.age.hist(bins=20)\nplt.show()", "_____no_output_____" ] ], [ [ "There are approximately twice as male passengers as female passengers aboard.", "_____no_output_____" ] ], [ [ "dftrain.sex.value_counts().plot(kind='barh')\nplt.show()", "_____no_output_____" ] ], [ [ "The majority of passengers were in the \"third\" class.", "_____no_output_____" ] ], [ [ "(dftrain['class']\n .value_counts()\n .plot(kind='barh'))\nplt.show()", "_____no_output_____" ] ], [ [ "Most passengers embarked from Southampton.", "_____no_output_____" ] ], [ [ "(dftrain['embark_town']\n .value_counts()\n .plot(kind='barh'))\nplt.show()", "_____no_output_____" ] ], [ [ "Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.", "_____no_output_____" ] ], [ [ "ax = (pd.concat([dftrain, y_train], axis=1)\\\n .groupby('sex')\n .survived\n .mean()\n .plot(kind='barh'))\nax.set_xlabel('% survive')\nplt.show()", "_____no_output_____" ] ], [ [ "## Create feature columns and input functions\nThe Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):", "_____no_output_____" ] ], [ [ "fc = tf.feature_column\nCATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',\n 'embark_town', 'alone']\nNUMERIC_COLUMNS = ['age', 'fare']\n\ndef one_hot_cat_column(feature_name, vocab):\n return fc.indicator_column(\n fc.categorical_column_with_vocabulary_list(feature_name,\n vocab))\nfeature_columns = []\nfor feature_name in CATEGORICAL_COLUMNS:\n # Need to one-hot encode categorical features.\n vocabulary = dftrain[feature_name].unique()\n feature_columns.append(one_hot_cat_column(feature_name, vocabulary))\n\nfor feature_name in NUMERIC_COLUMNS:\n feature_columns.append(fc.numeric_column(feature_name,\n dtype=tf.float32))", "_____no_output_____" ] ], [ [ "You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:", "_____no_output_____" ] ], [ [ "example = dftrain.head(1)\nclass_fc = one_hot_cat_column('class', ('First', 'Second', 'Third'))\nprint('Feature value: \"{}\"'.format(example['class'].iloc[0]))\nprint('One-hot encoded: ', fc.input_layer(dict(example), [class_fc]).numpy())", "_____no_output_____" ] ], [ [ "Additionally, you can view all of the feature column transformations together:", "_____no_output_____" ] ], [ [ "fc.input_layer(dict(example), feature_columns).numpy()", "_____no_output_____" ] ], [ [ "Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.", "_____no_output_____" ] ], [ [ "# Use entire batch since this is such a small dataset.\nNUM_EXAMPLES = len(y_train)\n\ndef make_input_fn(X, y, n_epochs=None, shuffle=True):\n y = np.expand_dims(y, axis=1)\n def input_fn():\n dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))\n if shuffle:\n dataset = dataset.shuffle(NUM_EXAMPLES)\n # For training, cycle thru dataset as many times as need (n_epochs=None).\n dataset = dataset.repeat(n_epochs)\n # In memory training doesn't use batching.\n dataset = dataset.batch(NUM_EXAMPLES)\n return dataset\n return input_fn\n\n# Training and evaluation input functions.\ntrain_input_fn = make_input_fn(dftrain, y_train)\neval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)", "_____no_output_____" ] ], [ [ "## Train and evaluate the model\n\nBelow you will do the following steps:\n\n1. Initialize the model, specifying the features and hyperparameters.\n2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.\n3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.\n\nBefore training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.", "_____no_output_____" ] ], [ [ "linear_est = tf.estimator.LinearClassifier(feature_columns)\n\n# Train model.\nlinear_est.train(train_input_fn, max_steps=100)\n\n# Evaluation.\nresults = linear_est.evaluate(eval_input_fn)\nprint('Accuracy : ', results['accuracy'])\nprint('Dummy model: ', results['accuracy_baseline'])", "_____no_output_____" ] ], [ [ "Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported, along with using any twice differentiable custom loss (`BoostedTreesEstimator`). Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.\n\n\n", "_____no_output_____" ] ], [ [ "# Since data fits into memory, use entire dataset per layer. It will be faster.\n# Above one batch is defined as the entire dataset.\nn_batches = 1\nest = tf.estimator.BoostedTreesClassifier(feature_columns,\n n_batches_per_layer=n_batches)\n\n# The model will stop training once the specified number of trees is built, not\n# based on the number of steps.\nest.train(train_input_fn, max_steps=100)\n\n# Eval.\nresults = est.evaluate(eval_input_fn)\nprint('Accuracy : ', results['accuracy'])\nprint('Dummy model: ', results['accuracy_baseline'])", "_____no_output_____" ] ], [ [ "For performance reasons, when your data fits in memory, it is recommended to use the `boosted_trees_classifier_train_in_memory` function. However if training time is not of a concern or if you have a very large dataset and want to do distributed training, use the `tf.estimator.BoostedTrees` API shown above.\n\n\nWhen using this method, you should not batch your input data, as the method operates on the entire dataset.\n", "_____no_output_____" ] ], [ [ "def make_inmemory_train_input_fn(X, y):\n y = np.expand_dims(y, axis=1)\n def input_fn():\n return dict(X), y\n return input_fn\n\n\ntrain_input_fn = make_inmemory_train_input_fn(dftrain, y_train)\neval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)\nest = tf.contrib.estimator.boosted_trees_classifier_train_in_memory(\n train_input_fn,\n feature_columns)\nprint(est.evaluate(eval_input_fn)['accuracy'])", "_____no_output_____" ] ], [ [ "Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.", "_____no_output_____" ] ], [ [ "pred_dicts = list(est.predict(eval_input_fn))\nprobs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])\n\nprobs.plot(kind='hist', bins=20, title='predicted probabilities')\nplt.show()", "_____no_output_____" ] ], [ [ "Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.", "_____no_output_____" ] ], [ [ "from sklearn.metrics import roc_curve\n\nfpr, tpr, _ = roc_curve(y_eval, probs)\nplt.plot(fpr, tpr)\nplt.title('ROC curve')\nplt.xlabel('false positive rate')\nplt.ylabel('true positive rate')\nplt.xlim(0,)\nplt.ylim(0,)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d059e926205dcd82c6d737b2c69d8be28bcd92c3
257,300
ipynb
Jupyter Notebook
intro-neural-networks/gradient-descent/GradientDescent.ipynb
tobias-fyi/deep-learning-v2-pytorch
92889c80a89ee7eef13cfe044aa187817e8fb2f0
[ "MIT" ]
null
null
null
intro-neural-networks/gradient-descent/GradientDescent.ipynb
tobias-fyi/deep-learning-v2-pytorch
92889c80a89ee7eef13cfe044aa187817e8fb2f0
[ "MIT" ]
null
null
null
intro-neural-networks/gradient-descent/GradientDescent.ipynb
tobias-fyi/deep-learning-v2-pytorch
92889c80a89ee7eef13cfe044aa187817e8fb2f0
[ "MIT" ]
null
null
null
928.880866
104,079
0.801601
[ [ [ "# Implementing the Gradient Descent Algorithm\n\nIn this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# Some helper functions for plotting and drawing lines\n\ndef plot_points(X, y):\n admitted = X[np.argwhere(y==1)]\n rejected = X[np.argwhere(y==0)]\n plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')\n plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')\n\ndef display(m, b, color='g--'):\n plt.xlim(-0.05,1.05)\n plt.ylim(-0.05,1.05)\n x = np.arange(-10, 10, 0.1)\n plt.plot(x, m*x+b, color)", "_____no_output_____" ] ], [ [ "## Reading and plotting the data", "_____no_output_____" ] ], [ [ "data = pd.read_csv('data.csv', header=None)\nX = np.array(data[[0,1]])\ny = np.array(data[2])\nplot_points(X,y)\nplt.show()", "_____no_output_____" ] ], [ [ "## TODO: Implementing the basic functions\nHere is your turn to shine. Implement the following formulas, as explained in the text.\n- Sigmoid activation function\n\n$$\\sigma(x) = \\frac{1}{1+e^{-x}}$$\n\n- Output (prediction) formula\n\n$$\\hat{y} = \\sigma(w_1 x_1 + w_2 x_2 + b)$$\n\n- Error function\n\n$$Error(y, \\hat{y}) = - y \\log(\\hat{y}) - (1-y) \\log(1-\\hat{y})$$\n\n- The function that updates the weights\n\n$$ w_i \\longrightarrow w_i + \\alpha (y - \\hat{y}) x_i$$\n\n$$ b \\longrightarrow b + \\alpha (y - \\hat{y})$$", "_____no_output_____" ] ], [ [ "# Implement the following functions\n\n# Activation (sigmoid) function\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n# Output (prediction) formula\ndef output_formula(features, weights, bias):\n return sigmoid(np.dot(features, weights) + bias)\n\n# Error (log-loss) formula\ndef error_formula(y, output):\n return -(y * np.log(output)) - ((1 - y) * np.log(1 - output))\n\n# Gradient descent step\ndef update_weights(x, y, weights, bias, learnrate):\n yhat = output_formula(x, weights, bias)\n d_error = y - yhat\n weights += learnrate * d_error * x\n bias += learnrate * d_error\n return weights, bias", "_____no_output_____" ] ], [ [ "## Training function\nThis function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm.", "_____no_output_____" ] ], [ [ "np.random.seed(44)\n\nepochs = 100\nlearnrate = 0.01\n\ndef train(features, targets, epochs, learnrate, graph_lines=False):\n \n errors = []\n n_records, n_features = features.shape\n last_loss = None\n weights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n bias = 0\n for e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features, targets):\n output = output_formula(x, weights, bias)\n error = error_formula(y, output)\n weights, bias = update_weights(x, y, weights, bias, learnrate)\n \n # Printing out the log-loss error on the training set\n out = output_formula(features, weights, bias)\n loss = np.mean(error_formula(targets, out))\n errors.append(loss)\n if e % (epochs / 10) == 0:\n print(\"\\n========== Epoch\", e,\"==========\")\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n predictions = out > 0.5\n accuracy = np.mean(predictions == targets)\n print(\"Accuracy: \", accuracy)\n if graph_lines and e % (epochs / 100) == 0:\n display(-weights[0]/weights[1], -bias/weights[1])\n \n\n # Plotting the solution boundary\n plt.title(\"Solution boundary\")\n display(-weights[0]/weights[1], -bias/weights[1], 'black')\n\n # Plotting the data\n plot_points(features, targets)\n plt.show()\n\n # Plotting the error\n plt.title(\"Error Plot\")\n plt.xlabel('Number of epochs')\n plt.ylabel('Error')\n plt.plot(errors)\n plt.show()", "_____no_output_____" ] ], [ [ "## Time to train the algorithm!\nWhen we run the function, we'll obtain the following:\n- 10 updates with the current training loss and accuracy\n- A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs.\n- A plot of the error function. Notice how it decreases as we go through more epochs.", "_____no_output_____" ] ], [ [ "train(X, y, epochs, learnrate, True)", "\n========== Epoch 0 ==========\nTrain loss: 0.7135845195381634\nAccuracy: 0.4\n\n========== Epoch 10 ==========\nTrain loss: 0.6225835210454962\nAccuracy: 0.59\n\n========== Epoch 20 ==========\nTrain loss: 0.5548744083669508\nAccuracy: 0.74\n\n========== Epoch 30 ==========\nTrain loss: 0.501606141872473\nAccuracy: 0.84\n\n========== Epoch 40 ==========\nTrain loss: 0.4593334641861401\nAccuracy: 0.86\n\n========== Epoch 50 ==========\nTrain loss: 0.42525543433469976\nAccuracy: 0.93\n\n========== Epoch 60 ==========\nTrain loss: 0.3973461571671399\nAccuracy: 0.93\n\n========== Epoch 70 ==========\nTrain loss: 0.3741469765239074\nAccuracy: 0.93\n\n========== Epoch 80 ==========\nTrain loss: 0.35459973368161973\nAccuracy: 0.94\n\n========== Epoch 90 ==========\nTrain loss: 0.3379273658879921\nAccuracy: 0.94\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d059ecc344a2016634e45f04a3e3a567f72b983e
8,149
ipynb
Jupyter Notebook
tutorials/ImageCollection/01_image_collection_overview.ipynb
Yisheng-Li/geemap
0594917a4acedfebb85879cfe2bcb6a406a55f39
[ "MIT" ]
1,894
2020-03-10T04:44:09.000Z
2022-03-31T08:19:15.000Z
tutorials/ImageCollection/01_image_collection_overview.ipynb
Yisheng-Li/geemap
0594917a4acedfebb85879cfe2bcb6a406a55f39
[ "MIT" ]
398
2020-03-19T14:04:21.000Z
2022-03-31T15:48:04.000Z
tutorials/ImageCollection/01_image_collection_overview.ipynb
Yisheng-Li/geemap
0594917a4acedfebb85879cfe2bcb6a406a55f39
[ "MIT" ]
759
2020-03-17T21:58:53.000Z
2022-03-29T13:12:39.000Z
40.341584
1,027
0.64781
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_parent\" href=\"https://github.com/giswqs/geemap/tree/master/tutorials/ImageCollection/01_image_collection_overview.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_parent\" href=\"https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/tutorials/ImageCollection/01_image_collection_overview.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_parent\" href=\"https://colab.research.google.com/github/giswqs/geemap/blob/master/tutorials/ImageCollection/01_image_collection_overview.ipynb\"><img width=26px src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "# ImageCollection Overview\nAn `ImageCollection` is a stack or time series of images. In addition to loading an `ImageCollection` using an Earth Engine collection ID, Earth Engine has methods to create image collections. The constructor `ee.ImageCollection()` or the convenience method `ee.ImageCollection.fromImages()` create image collections from lists of images. You can also create new image collections by merging existing collections. For example:", "_____no_output_____" ], [ "## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).", "_____no_output_____" ] ], [ [ "# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.foliumap as emap\nexcept:\n import geemap as emap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThe default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ", "_____no_output_____" ] ], [ [ "Map = emap.Map(center=[40,-100], zoom=4)\nMap.add_basemap('ROADMAP') # Add Google Map\nMap", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Create arbitrary constant images.\nconstant1 = ee.Image(1)\nconstant2 = ee.Image(2)\n\n# Create a collection by giving a list to the constructor.\ncollectionFromConstructor = ee.ImageCollection([constant1, constant2])\nprint('collectionFromConstructor: ', collectionFromConstructor.getInfo())", "_____no_output_____" ], [ "# Create a collection with fromImages().\ncollectionFromImages = ee.ImageCollection.fromImages(\n [ee.Image(3), ee.Image(4)])\nprint('collectionFromImages: ', collectionFromImages.getInfo())", "_____no_output_____" ], [ "# Merge two collections.\nmergedCollection = collectionFromConstructor.merge(collectionFromImages)\nprint('mergedCollection: ', mergedCollection.getInfo())", "_____no_output_____" ], [ "# Create an ee.Geometry.\npolygon = ee.Geometry.Polygon([\n [[-35, -10], [35, -10], [35, 10], [-35, 10], [-35, -10]]\n])\n\n# Create a toy FeatureCollection\nfeatures = ee.FeatureCollection(\n [ee.Feature(polygon, {'foo': 1}), ee.Feature(polygon, {'foo': 2})])\n\nprint(features.getInfo())\n\n# Create an ImageCollection from the FeatureCollection\n# by mapping a function over the FeatureCollection.\nimages = features.map(lambda feature: ee.Image(ee.Number(feature.get('foo'))))\n\n# Print the resultant collection.\nprint('Image collection: ', images.getInfo())", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.addLayerControl()\nMap", "_____no_output_____" ] ], [ [ "Note that in this example an `ImageCollection` is created by mapping a function that returns an `Image` over a `FeatureCollection`. Learn more about mapping in the [Mapping over an ImageCollection section](https://developers.google.com/earth-engine/ic_mapping.html). Learn more about feature collections from the [FeatureCollection section](https://developers.google.com/earth-engine/feature_collections.html).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d059ed92dea4c7828a10e54a68b316cfe2370a77
433,800
ipynb
Jupyter Notebook
tutorials/paper_implementation.ipynb
KennethEnevoldsen/siptom
0535f4c7d8351d45aa5749471a7f9333384facc7
[ "Apache-2.0" ]
1
2019-08-14T11:42:53.000Z
2019-08-14T11:42:53.000Z
tutorials/paper_implementation.ipynb
KennethEnevoldsen/siptom
0535f4c7d8351d45aa5749471a7f9333384facc7
[ "Apache-2.0" ]
null
null
null
tutorials/paper_implementation.ipynb
KennethEnevoldsen/siptom
0535f4c7d8351d45aa5749471a7f9333384facc7
[ "Apache-2.0" ]
null
null
null
441.751527
68,494
0.937153
[ [ [ "# Regarding this Notebook\nThis is a replication of the original analysis performed in the paper by [Waade & Enevoldsen 2020](missing). This replication script will not be updated as it is intended for reproducibility. Any deviations from the paper is marked with bold for transparency.\nFootnotes and internal documentation references are removed from this example to avoid confusion.\n\n---\n", "_____no_output_____" ], [ "# 2.2 Using tomsup\nOne of the advantages of computational models of cognitive processes is that the implications of the model can be worked out by simulating the model’s behavior in a variety of situations. tomsup in particular, allows to test the k-ToM model as it plays a wide set of game-theoretical situations (e.g. Matching Pennies or Prisoner’s Dilemma), in interaction with a variety of different agents (e.g. other k-ToM or less sophisticated agents), within different possible settings (e.g. repeated interactions with the same opponent, or round robin tournaments). In order to better understand the setup of the tomsup package, we start with the case of two simple agents interacting, followed by a simple exampleusing k-ToM agents, which will also illustrate how one might implement tomsup in an experiment. Lastly, we will show how to run a simulation using multiple agents as well as how to plot the evolving internal states of a k-ToM agent. In this simple scenario two agents are playing the Matching Pennies game. One agent hides a penny in one hand: let’s say chooses 0 for hiding in the left hand, and 1 in the right. The other agent has to guess where the penny is. If the second agent guesses (chooses the same hand as the first), it wins and the first loses. In other words, the first agent wants to choose the hand that the second will not choose and the second wants to choose the hand that the first chooses. In this example, one of the agents implements the Random Bias strategy (e.g. has a 60 percent probability of choosing right over left), while the other implements a classic Q-learning strategy (a model free reinforcement learning mechanism updating the expected reward of choosing a specific option on a trial by trial basis). The full list of strategies already implemented in tomsup is accessible using the function `valid_agents()`. The user first has to install the tomsup package developed using python 3.6 (Van Rossum & Drake, 2009). The package can be downloaded and installed using pip:\n\n```pip3 install tomsup```", "_____no_output_____" ], [ "**However, in this notebook we will assume the user simply downloaded the git. Feel free to skip the next code chunk if that is not the case.**", "_____no_output_____" ] ], [ [ "# assuming you are in the github folder change the path - not relevant if tomsup is installed via. pip\nimport os\n\nos.chdir(\"..\") # go out of the tutorials folder", "_____no_output_____" ] ], [ [ "Both approaches will also install the required dependencies. Now tomsup can be imported into Python following the lines;", "_____no_output_____" ] ], [ [ "import tomsup as ts", "_____no_output_____" ] ], [ [ "We will also set a arbitrary seed for to ensure reproducibility;", "_____no_output_____" ] ], [ [ "import random\nimport numpy as np\n\nnp.random.seed(1995)\nrandom.seed(1995) # The year of birth of the first author", "_____no_output_____" ] ], [ [ "First we need to set up the Matching Pennies game. As different games are defined by different payoff matrices, we set up the game by creating the appropriate payoff matrix using the ```PayoffMatrix``` class. ", "_____no_output_____" ] ], [ [ "# initiate the competitive matching pennies game\npenny = ts.PayoffMatrix(name=\"penny_competitive\")\n\n# print the payoff matrix\nprint(penny)", "<Class PayoffMatrix, Name = penny_competitive> \nThe payoff matrix of agent 0\n | Choice agent 1\n | | 0 | 1 |\n | ------------ |\nChoice | 0 | -1 | 1 |\nagent 0| 1 | 1 | -1 |\n \nThe payoff matrix of agent 1\n | Choice agent 1\n | | 0 | 1 |\n | ------------ |\nChoice | 0 | 1 | -1 |\nagent 0| 1 | -1 | 1 |\n \n" ] ], [ [ "\nThe Matching Pennies game is a zero sum game, meaning that for one agent to get a reward, the opponent has to lose. Agents have thus to predict their opponents' behavior, which is ideal for investigating \\gls{tom}. Note that to explore other payoff matrices included in the package, or to learn how to specify a custom payoff matrix, the user can type the `help(ts.PayoffMatrix)` command.\n\nThen we create the first of the two competing agents:\n", "_____no_output_____" ] ], [ [ "# define the random bias agent, which chooses 1 70 percent of the time, and call the agent \"jung\"\njung = ts.RB(bias=0.7)\n\n# Examine Agent\nprint(f\"jung is a class of type: {type(jung)}\")\nif isinstance(jung, ts.Agent):\n print(f\"but jung is also an instance of the parent class ts.Agent\")\n\n# let us have Jung make a choice\nchoice = jung.compete()\n\nprint(f\"jung chose {choice} and its probability for choosing 1 was {jung.get_bias()}.\")", "jung is a class of type: <class 'tomsup.agent.RB'>\nbut jung is also an instance of the parent class ts.Agent\njung chose 1 and his probability for choosing 1 was 0.7.\n" ] ], [ [ "\nNote that it is possible to create one or more agents simultaneously using the convenient `create\\_agents()` and passing any starting parameters to it in the form of a dictionary. ", "_____no_output_____" ] ], [ [ "# create a reinforcement learning agent\nskinner = ts.create_agents(agents=\"QL\", start_params={\"save_history\": True})", "_____no_output_____" ] ], [ [ "\nNow that both agents are created, we have them play against each other. \n", "_____no_output_____" ] ], [ [ "# have the agents compete for 30 rounds\nresults = ts.compete(jung, skinner, p_matrix=penny, n_rounds=30)\n\n# examine results\nprint(results.head()) # inspect the first 5 rows of the dataframe", " round choice_agent0 choice_agent1 payoff_agent0 payoff_agent1\n0 0 1 0 1 -1\n1 1 1 1 -1 1\n2 2 0 1 1 -1\n3 3 1 1 -1 1\n4 4 0 1 1 -1\n" ] ], [ [ "** Note: you can remove the print() to get a nicer printout of the dataframe **", "_____no_output_____" ] ], [ [ "results.head() # inspect the first 5 rows of the dataframe", "_____no_output_____" ] ], [ [ "\nThe data frame stores the choice of each agent as well as their resulting payoff. Simply summing the payoff columns would determine the winner.\n\n\n## k-ToM\nHere we will present some simple examples of the k-ToM agent. For a more in-depth description we recommend checking the expanded introduction on the [Github repository](https://github.com/KennethEnevoldsen/tomsup/blob/master/tutorials/introduction_to_tom.ipynb).\n\nWe will start of by creating a 0-ToM with default priors and `save_history=True` to examine the workings of it. Notice that setting `save_history` is turned off by default to save on memory which is especially problematic for ToM agents with high sophistication level.", "_____no_output_____" ] ], [ [ "# Creating a simple 1-ToM with default parameters\ntom_1 = ts.TOM(level=1, dilution=None, save_history=True)\n\n# Extract the parameters\ntom_1.print_parameters()", "volatility (log scale): -2\nb_temp (log odds): -1\nbias: 0\n" ] ], [ [ "\nNote that k-ToM agents as default uses agnostic starting beliefs. These can be shown in detail and specified as desired, as shown in **appendix in the paper**.\n\nTo increase the agent's tendency to choose one we could simply increase its bias. Similarly, if we want the agent to behave in a more more deterministic fashion we can decrease the behavioural temperature. When the parameter values are set, we can play the agent against an opponent using the `.compete()` method. Where `agent` denote the agent in the payoff matrix (0 or 1) and the `op_choice` denote the choice of the opponent during the previous round. ", "_____no_output_____" ] ], [ [ "tom_2 = ts.TOM(\n level=2,\n volatility=-2,\n b_temp=-2, # more deterministic\n bias=0,\n dilution=None,\n save_history=True,\n)\nchoice = tom_2.compete(p_matrix=penny, agent=0, op_choice=None)\nprint(\"tom_2 chose:\", choice)", "tom_2 choose: 0\n" ] ], [ [ "The user is recommended to have the 1-ToM and the 2-ToM agents compete using the previously presented `ts.compete()` function for simplicity. However, to make the process more transparent for the user in the following we create a simple for-loop:\n", "_____no_output_____" ] ], [ [ "tom_2.reset() # reset before start\n\n\nprev_choice_1tom = None\nprev_choice_2tom = None\nfor trial in range(1, 4):\n # note that op_choice is choice on previous turn\n # and that agent is the agent you respond to in the payoff matrix\n choice_1 = tom_1.compete(p_matrix=penny, agent=0, op_choice=prev_choice_1tom)\n choice_2 = tom_2.compete(p_matrix=penny, agent=1, op_choice=prev_choice_2tom)\n\n # update previous choice\n prev_choice_1tom = choice_1\n prev_choice_2tom = choice_2\n\n print(\n f\"Round {trial}\",\n f\" 1-ToM choose {choice_1}\",\n f\" 2-ToM choose {choice_2}\",\n sep=\"\\n\",\n )", "Round 1\n 1-ToM choose 0\n 2-ToM choose 0\nRound 2\n 1-ToM choose 1\n 2-ToM choose 1\nRound 3\n 1-ToM choose 1\n 2-ToM choose 0\n" ] ], [ [ "A for loop like this can be used to implement k-ToM in an experimental setting by replacing the agent with the behavior of a participant. Examples of such implementations (interfacing with PsychoPy are available in the [documentation](https://github.com/KennethEnevoldsen/tomsup/tree/master/tutorials/psychopy_experiment)). \n", "_____no_output_____" ] ], [ [ "tom_2.print_internal(\n keys=[\"p_k\", \"p_op\"], level=[0, 1] # print these two states\n) # for the agent simulated opponents 0-ToM and 1-ToM", "opponent_states\n| 0-ToM\n| | opponent_states\n| | own_states\n| 1-ToM\n| | opponent_states\n| | | 0-ToM\n| | | | opponent_states\n| | | | own_states\n| | own_states\n| | | p_k (probability): [1.0]\nown_states\n| p_k (probability): [0.4828785372717201, 0.5171214627282799]\n| p_op (probability): 0.42603436862106425\n" ] ], [ [ "\nFor instance, we can note that the estimate of the opponent's sophistication level (\\texttt{p\\_k}) slightly favors a 1-ToM as opposed to a 0-ToM and that the average probability of the opponent choosing one (`p_op`) slightly favors 1 (which was indeed the option the opponent chose). These estimates are quite uncertain due to the few rounds played. More information on how to interpret the internal states of the ToM agent is available in the documentation of the package, e.g. by using the help function `help(tom_2.print_internal)`\n\n## Multiple Agents and Visualizing Results\nThe above syntax is useful for small setups. However, the user might want to build larger simulations involving several agents to simulate data for experimental setup or test underlying assumptions. The package provides syntax for quickly iterating over multiple agents, rounds and even simulations. We will here show a quick example along with how to visualize the results and internal states of ToM agents.", "_____no_output_____" ] ], [ [ "# Create a list of agents\nagents = [\"RB\", \"QL\", \"WSLS\", \"1-TOM\", \"2-TOM\"]\n# And set their starting parameters. An empty dictionary denotes default values\nstart_params = [{\"bias\": 0.7}, {\"learning_rate\": 0.5}, {}, {}, {}]\n\ngroup = ts.create_agents(agents, start_params) # create a group of agents\n\n# Specify the environment\n# round_robin e.g. each agent will play against all other agents\ngroup.set_env(env=\"round_robin\")\n\n# Finally, we make the group compete 20 simulations of 30 rounds\nresults = group.compete(p_matrix=penny, n_rounds=30, n_sim=20, save_history=True)", "_____no_output_____" ] ], [ [ "Following the simulation, a data frame can be extracted as before, with additional columns reporting simulation number, competing agent pair (`agent0` and `agent1`) and if `save_history=True` it will also add two columns denoting the internal states of each agent, e.g. estimates and expectations at each trial.\n", "_____no_output_____" ] ], [ [ "res = group.get_results()\nprint(res.head(1)) # print the first row", " n_sim round choice_agent0 choice_agent1 payoff_agent0 payoff_agent1 \\\n0 0 0 1 1 -1 1 \n\n history_agent0 history_agent1 agent0 \\\n0 {'choice': 1} {'choice': 1, 'expected_value0': 0.5, 'expecte... RB \n\n agent1 \n0 QL \n" ] ], [ [ "**Again, removing the print statement gives you a more readable output**", "_____no_output_____" ] ], [ [ "res.head(1)", "_____no_output_____" ] ], [ [ "** to allow other authors to examine these results we have also saved the results to a new lines delimited .ndjson**", "_____no_output_____" ] ], [ [ "res.to_json(\"tutorials/paper.ndjson\", orient=\"records\", lines=True)", "_____no_output_____" ] ], [ [ "The package also provides convenient functions for plotting the agent's choices and performance.\n", "_____no_output_____" ], [ "> for nicer plots we will increase the figure size using the following code. This is excluded from the paper for simplicity", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\n# Set figure size\nplt.rcParams[\"figure.figsize\"] = [10, 10]", "_____no_output_____" ], [ "# plot a heatmap of the rewards for all agent in the tournament\ngroup.plot_heatmap(cmap=\"RdBu_r\")\n\nplt.rcParams[\"figure.figsize\"] = [5, 5]\n\n# plot the choices of the 1-ToM agent when competing against the WSLS agent\ngroup.plot_choice(agent0=\"WSLS\", agent1=\"1-TOM\", agent=1)\n# plot the choices of the 1-ToM agent when competing against the WSLS agent\ngroup.plot_choice(agent0=\"RB\", agent1=\"1-TOM\", agent=1)\n\n# plot the score of the 1-ToM agent when competing against the WSLS agent\ngroup.plot_score(agent0=\"WSLS\", agent1=\"1-TOM\", agent=1)\n# plot the score of the 2-ToM agent when competing against the WSLS agent\ngroup.plot_score(agent0=\"WSLS\", agent1=\"2-TOM\", agent=1)", "_____no_output_____" ] ], [ [ "As seen in the heatmap we see that the k-ToM model compares favorably against simpler agents\nsuch as the QL. Furthermore notice that the 1-ToM and 2-ToM compares especially favorably\nagainst the WSLS agent as this agent act as a deterministic 0-ToM. Similarly, we see that the\n2-ToM agent incurs a cost for being more complex by being less able to take advantage of the\ndeterministic nature of WSLS. We can examine this further in the figures, where we see that the\n1-ToM is almost perfectly able to predict the behaviour of the WSLS agent after a turn 5\nacross simulations while the 2-ToM, take longer to estimate the behaviour. The figures also show\nthat 1-ToM differs in behavioural patterns figures when playing against a RB agents showing\na bias estimation behaviour, while when playing against the WSLS it shows a oscillating\nchoice pattern. Ultimately these are meant for initial investigation and more elaborate plots\ncan be constructed from the results data frame.\n\n> here we just refer to the figures, for more exact references please see the paper", "_____no_output_____" ], [ "Besides these general plots the package also contains a series of shortcuts for plotting $k$-ToM's internal states such as its estimate of its opponent's sophistication level, in which it is seen that the 2-ToM correctly estimates the opponents estimates as having a sophistication level of 1 on average.\n", "_____no_output_____" ] ], [ [ "# plot 2-ToM estimate of its opponent sophistication level\ngroup.plot_p_k(agent0=\"1-TOM\", agent1=\"2-TOM\", agent=1, level=0)\ngroup.plot_p_k(agent0=\"1-TOM\", agent1=\"2-TOM\", agent=1, level=1)", "_____no_output_____" ] ], [ [ "\nIt is also easy to plot k-ToM's estimates of its opponent's model parameters. As an example, the following code plots the 2-ToM's estimate of 1-ToM's volatility and bias. We see that the ToM agent approaches a correct estimate of the default volatility of -2 as well as correctly estimated its opponent as having no inherent bias.\n", "_____no_output_____" ] ], [ [ "# plot 2-ToM estimate of its opponent's volatility while believing the opponent to be level 1.\ngroup.plot_tom_op_estimate(\n agent0=\"1-TOM\", agent1=\"2-TOM\", agent=1, estimate=\"volatility\", level=1, plot=\"mean\"\n)\n\n# plot 2-ToM estimate of its opponent's bias while believing the opponent to be level 1.\ngroup.plot_tom_op_estimate(\n agent0=\"1-TOM\", agent1=\"2-TOM\", agent=1, estimate=\"bias\", level=1, plot=\"mean\"\n)", "_____no_output_____" ] ], [ [ "\nUse `help(ts.AgentGroup.plot_tom_op_estimate)` for information on how to plot the other estimated parameters or k-ToM's uncertainty in these parameters. \nAdditional information can be found in the history column in the results data frame, if needed. This includes all k-ToM's internal states (the changing variables in the model) which for example include choice probability, gradient, estimate uncertainties as well as k-ToM's estimates of its opponent's internal states. Documentation, examples and further tutorials can be found on the Github repository, this also includes a more in-depth description of the dynamics of **the k-ToM model implementation**.", "_____no_output_____" ], [ "---\n## Are you left with any questions?\nFeel free to open a github issue with questions and or bug reports.\n\nBest, \n\n*Enevoldsen and Waade*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d059f33cb8dfeed6ff7799b036b0f96b87ec54c4
77,092
ipynb
Jupyter Notebook
Roberto_Zerbini's_Blog_Polynomial_Regression.ipynb
robertozerbini/blog
efda285dcdf3f06feca14eab7e09cca2022ae245
[ "MIT" ]
null
null
null
Roberto_Zerbini's_Blog_Polynomial_Regression.ipynb
robertozerbini/blog
efda285dcdf3f06feca14eab7e09cca2022ae245
[ "MIT" ]
null
null
null
Roberto_Zerbini's_Blog_Polynomial_Regression.ipynb
robertozerbini/blog
efda285dcdf3f06feca14eab7e09cca2022ae245
[ "MIT" ]
null
null
null
157.652352
17,002
0.881466
[ [ [ "<a href=\"https://colab.research.google.com/github/robertozerbini/blog/blob/add-license-1/Roberto_Zerbini's_Blog_Polynomial_Regression.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math \nimport sklearn \n\naxes = [-2.5, 2.5, -3.5, 3.5]", "_____no_output_____" ] ], [ [ "#Get Data", "_____no_output_____" ] ], [ [ "#generate data\ndef get_data(m):\n np.random.seed(3)\n X = np.random.randn(m, 1)\n y = np.sin(3 * X) + np.random.randn(m, 1) *.5\n return X,y", "_____no_output_____" ], [ "X,y = get_data(500)", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nplt.title('Raw Data')\nax.plot(X, y, \"b.\") \nax.axis(axes)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"y\")\nplt.show()", "_____no_output_____" ] ], [ [ "#Train Linear Model", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\n\nlin_reg = LinearRegression() \nlin_reg.fit(X_train, y_train)", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error \n\ny_test_prediction = lin_reg.predict(X_test)\nlin_mse_test = mean_squared_error(y_test, y_test_prediction)\n\nlin_mse_test", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nplt.title('Prediction Linear Model')\nplt.axis(axes)\n\nax.plot(X_test, y_test_prediction, \"r*\", label = 'Prediction') \nax.plot(X_test, y_test, \"b.\", label = 'Test Data') \nax.axis(axes)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"y\")\n\nplt.legend(loc=\"upper left\", fontsize=8)\nplt.show()", "_____no_output_____" ] ], [ [ "#Train Polynomial Model", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import PolynomialFeatures\n\ndegree = 8\npl = PolynomialFeatures(degree = degree)\n\nX_train_p = pl.fit_transform(X_train)\nX_test_p = pl.fit_transform(X_test)", "_____no_output_____" ], [ "print('Number of features X = {} Number of features X_poly = {}'.format(X_train.shape[1], X_train_p.shape[1]))", "Number of features X = 1 Number of features X_poly = 9\n" ], [ "poly_reg = LinearRegression() \npoly_reg.fit(X_train_p, y_train)", "_____no_output_____" ], [ "y_prediction_p = poly_reg.predict(X_train_p)\npoly_mse_train = mean_squared_error(y_train, y_prediction_p)", "_____no_output_____" ], [ "y_test_prediction_p = poly_reg.predict(X_test_p)\npoly_mse_test = mean_squared_error(y_test, y_test_prediction_p)", "_____no_output_____" ], [ "print(\"MSE linear: {} MSE polynomial: {} %difference {}\".format(lin_mse_test, poly_mse_test, 1- poly_mse_test / lin_mse_test))", "MSE linear: 0.6808472606336224 MSE polynomial: 0.3598539136348059 %difference 0.4714616119628474\n" ], [ "fig, ax = plt.subplots()\nplt.title('Prediction Polynomial Model (degree ' + str(degree) + ')')\nplt.axis(axes)\n\nax.plot(X_test, y_test_prediction_p, \"r*\", label = 'Prediction') \nax.plot(X_test, y_test, \"b.\", label = 'Test Data') \nax.axis(axes)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"y\")\n\nplt.legend(loc=\"upper left\", fontsize=8)\nplt.show()", "_____no_output_____" ] ], [ [ "#Variance Analysis", "_____no_output_____" ] ], [ [ "percent_diff = []\nmse_train = []\nmse_test = []\niter = 17\nfor d in range(iter):\n\n pl = PolynomialFeatures(degree = d)\n\n X_train_p = pl.fit_transform(X_train)\n X_test_p = pl.fit_transform(X_test)\n\n lin_reg = LinearRegression() \n lin_reg.fit(X_train_p, y_train)\n\n y_prediction_p = lin_reg.predict(X_train_p)\n lin_mse_train_p = mean_squared_error(y_train, y_prediction_p)\n\n y_test_prediction_p = lin_reg.predict(X_test_p)\n lin_mse_test_p = mean_squared_error(y_test, y_test_prediction_p)\n\n mse_train.append(lin_mse_train_p)\n mse_test.append(lin_mse_test_p) \n percent_diff.append(1- lin_mse_test_p / lin_mse_train_p)", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(np.linspace(1,iter,num=iter), mse_train, \"b-*\", label = 'MSE Train')\nax.plot(np.linspace(1,iter,num=iter), mse_test, \"y-*\", label = 'MSE Test') \nax.set_xlabel(\"Polinomial Degree\")\nax.set_ylabel(\"MSE\")\nplt.legend(loc=\"upper left\", fontsize=8)\nplt.title('Model Variance')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d059fe4ce7871a0288b02b9557cc3611abc1ad34
4,699
ipynb
Jupyter Notebook
workbooks/validation/causal_links.ipynb
amarallab/waldo
e38d23d9474a0bcb7a94e685545edb0115b12af4
[ "MIT" ]
null
null
null
workbooks/validation/causal_links.ipynb
amarallab/waldo
e38d23d9474a0bcb7a94e685545edb0115b12af4
[ "MIT" ]
null
null
null
workbooks/validation/causal_links.ipynb
amarallab/waldo
e38d23d9474a0bcb7a94e685545edb0115b12af4
[ "MIT" ]
null
null
null
22.81068
116
0.462013
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d05a00f18adac3ad07a096130c34042bcaab668f
75,871
ipynb
Jupyter Notebook
report.ipynb
sajjad-ahmed/Udacity-DeepRL-Project-1-navigation
5653001ec5e914cc89e45cafbc4abfbac0b05d0c
[ "MIT" ]
null
null
null
report.ipynb
sajjad-ahmed/Udacity-DeepRL-Project-1-navigation
5653001ec5e914cc89e45cafbc4abfbac0b05d0c
[ "MIT" ]
4
2020-09-26T01:22:34.000Z
2021-08-25T16:09:50.000Z
report.ipynb
sajjad-ahmed/Udacity-DeepRL-Project-1-navigation
5653001ec5e914cc89e45cafbc4abfbac0b05d0c
[ "MIT" ]
1
2020-07-31T12:14:01.000Z
2020-07-31T12:14:01.000Z
722.580952
72,972
0.953513
[ [ [ "# Abstract", "_____no_output_____" ], [ "In this experiment, I have tried to implement results Deep Q-network as a part of Udacity navigation project. \nBased on the episodic iteration I try to analyse the performance.", "_____no_output_____" ], [ "# Overview", "_____no_output_____" ], [ "For this project, you will train an agent to navigate (and collect bananas!) in a large, square world.\n\nA reward of +1 is provided for collecting a yellow banana, and a reward of -1 is provided for collecting a blue banana. Thus, the goal of your agent is to collect as many yellow bananas as possible while avoiding blue bananas.\n\nThe state space has 37 dimensions and contains the agent's velocity, along with ray-based perception of objects around the agent's forward direction. Given this information, the agent has to learn how to best select actions. Four discrete actions are available, corresponding to:\n\n- 0 - move forward.\n- 1 - move backward.\n- 2 - turn left.\n- 3 - turn right.\n\nThe task is episodic, and in order to solve the environment, your agent must get an average score of +13 over 100 consecutive episodes.", "_____no_output_____" ], [ "# Output Analysis", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nfrom IPython.core.display import HTML \nImage(filename = \"img.png\")", "_____no_output_____" ] ], [ [ "As we can observe from the figure, the agent tries to achieve better result after 50th episode.\nBecause it tries to explore the environment. Then we can see increasing trend till 250th episode. After\nthat the agent the score remains a bit constant. Again this might be due to the exploration. At the end we can see the agent is scoring again.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d05a1a1ea1e77acacdd1df5d23f18d928739f295
134,247
ipynb
Jupyter Notebook
neural_collaborative_filtering/embedding.ipynb
sadrayan/vote-roll-call
3c19ef3213fcc10339159ae29f9d8d2fb5b4cb2a
[ "Apache-2.0" ]
null
null
null
neural_collaborative_filtering/embedding.ipynb
sadrayan/vote-roll-call
3c19ef3213fcc10339159ae29f9d8d2fb5b4cb2a
[ "Apache-2.0" ]
null
null
null
neural_collaborative_filtering/embedding.ipynb
sadrayan/vote-roll-call
3c19ef3213fcc10339159ae29f9d8d2fb5b4cb2a
[ "Apache-2.0" ]
null
null
null
39.345545
13,356
0.542157
[ [ [ "import keras\nfrom IPython.display import SVG\nfrom keras.optimizers import Adam\nfrom keras.utils.vis_utils import model_to_dot\nfrom tqdm import tqdm\n\nfrom keras import backend as K\n\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom numpy import array\nfrom numpy import asarray\nfrom numpy import zeros\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Embedding\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')\nimport seaborn as sns\n\n%matplotlib inline\nsns.set(style='whitegrid', palette='muted', font_scale=1.2)", "Using TensorFlow backend.\n" ], [ "df_bills = pd.read_csv('../data/bill_all.csv')\nprint(df_bills.columns)\ndf_bills.tail()", "Index(['Unnamed: 0', 'Unnamed: 0.1', 'Unnamed: 0.1.1', 'Unnamed: 0.1.1.1',\n 'index', 'Unnamed: 0.1.1.1.1', 'action_date', 'action_time', 'chamber',\n 'congress', 'legis_num', 'majority', 'name', 'party', 'role',\n 'rollcall_num', 'session', 'state', 'vote', 'vote_desc',\n 'vote_question', 'vote_result', 'vote_type', 'link', 'billText',\n 'sponsor', 'sponsor_id', 'sponsor_party', 'sponsor_state',\n 'sponsor_uri'],\n dtype='object')\n" ], [ "df_final = pd.read_csv('../data/df_vote_final.csv')\ndf_final.tail()", "_____no_output_____" ], [ "print(len(df_final.name.unique()))\nprint(len(df_final.sponsor_id.unique()))\ndf_final.columns\n", "1118\n841\n" ], [ "dataset = df_final[['name', 'legis_num', 'vote']]\ndataset['bill_id'] = dataset.legis_num.astype('category').cat.codes.values\ndataset['name_id'] = dataset.name.astype('category').cat.codes.values\ndataset['vote'] = dataset.vote.astype('category').cat.codes.values\n\n# dataset.drop(columns=['name', 'legis_num'], inplace=True)\ndataset = dataset.sample(frac=0.5, replace=True)\ndataset.reset_index(inplace=True)\ndataset.tail()", "_____no_output_____" ], [ "import gensim\nfrom gensim import utils\n\nword2vec_model = gensim.models.KeyedVectors.load_word2vec_format('/home/sonic/.keras/datasets/GoogleNews-vectors-negative300.bin',\n binary=True)\n", "_____no_output_____" ], [ "%%time\n\n\nimport nltk\n\n\nmax_words = 20000\nMAX_SEQUENCE_LENGTH = 1000\ndef process_doc(X):\n tokenizer = Tokenizer(num_words=max_words,lower=True, split=' ', \n filters='\"#%&()*+-/<=>@[\\\\]^_`{|}~\\t\\n',\n char_level=False, oov_token=u'<UNK>')\n\n X_text = X['billText'].values\n tokenizer.fit_on_texts(X_text)\n print(X.shape)\n \n X_seq = np.array(tokenizer.texts_to_sequences(X_text))\n X_seq = pad_sequences(X_seq, maxlen=MAX_SEQUENCE_LENGTH, padding='post')\n print('X_seq', X_seq.shape)\n\n count_vect = CountVectorizer()\n X_train_counts = count_vect.fit_transform(X_text)\n\n tf_transformer = TfidfTransformer().fit(X_train_counts)\n X_train_tf = tf_transformer.transform(X_train_counts)\n\n x_emb = {}\n # tokens = nltk.word_tokenize(list(X))\n# print('tokens.shape', tokens.shape)\n\n for idx, doc in tqdm(X.iterrows()): #look up each doc in model\n# print(doc['legis_num'], doc['billText'])\n x_emb[doc['legis_num']] = document_vector(word2vec_model, nltk.word_tokenize(doc['billText'].lower()))\n\n\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n\n\n return np.array(X_seq), word_index, x_emb, X_train_tf, X_train_counts\n\ndef document_vector(word2vec_model, doc):\n # remove out-of-vocabulary words\n doc = [word for word in doc if word in word2vec_model.vocab]\n return np.mean(word2vec_model[doc], axis=0)\n\n\ndef has_vector_representation(word2vec_model, doc):\n \"\"\"check if at least one word of the document is in the\n word2vec dictionary\"\"\"\n return not all(word not in word2vec_model.vocab for word in doc)\n\n\ndf_bills['billText'] = df_bills['billText'].apply(str)\nX_seq, word_index, X_emb, X_train_tf, X_train_counts = process_doc(df_bills)\n# df_bills['X_seq'] = X_seq\n# df_bills['X_emb'] = X_emb\n# df_bills['X_train_tf'] = X_train_tf\n# df_bills['X_train_counts'] = X_train_counts", "(4062, 30)\nX_seq (4062, 1000)\n" ], [ "# print(X_emb.shape)\nprint(X_emb['H R 5010'].shape)", "(300,)\n" ], [ "# print(X_emb.item()['H R 5010'])\n# print(X_emb.shape)\ndf_new = pd.DataFrame(X_emb)\n\n# df_new['legis_num'] = df_bills['legis_num']\n# df_new = df_new.drop_duplicates('legis_num', keep=False)\n# df_new.set_index('legis_num')\ndf_new.reset_index(inplace=True, drop=True)\n\ndf_new.tail()", "_____no_output_____" ], [ "len(dataset['name_id'].unique())", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\ntrain, test = train_test_split(dataset, test_size=0.2)\n\nprint()\nprint('train', train.shape)\nprint('test', test.shape)\ntrain.head()\n# y_train.head()", "\ntrain (2041478, 6)\ntest (510370, 6)\n" ], [ "n_users, n_bill = len(dataset.name_id.unique()), len(dataset.bill_id.unique())\n\nn_latent_factors = 50\n\nEMBEDDING_DIM = 100\n\nprint('number of legsitlators:', n_users)\nprint('number of bills', n_bill)", "number of legsitlators: 1117\nnumber of bills 4061\n" ], [ "def plot_history(history):\n# print(history.history)\n df = pd.DataFrame(history.history)\n print(df.describe())\n df.plot(xticks=range(epochs))\n# print(history.history.keys())\n", "_____no_output_____" ], [ "#plot data\nfig, ax = plt.subplots(figsize=(15,7))\n# print(dataset.groupby(['name'])['legis_num'].count())\nprint()\ndataset.groupby(['name_id'])['legis_num'].count().plot(kind='hist', bins=100, alpha=0.5)\n# dataset.groupby(['name']).count()['legis_num'].plot(ax=ax, kind='hist', bins=100, alpha=0.5)\nplt.show()\n# print(dataset.groupby(['legis_num'])['name_id'].count())\n# dataset.groupby(['legis_num'])['name_id'].count().plot(kind='hist', bins=10, alpha=0.5)", "\n" ], [ "! cd /home/sonic/.keras/datasets/glove.6B.100d.txt", "/bin/sh: line 0: cd: /home/sonic/.keras/datasets/glove.6B.100d.txt: Not a directory\r\n" ], [ "# load the whole embedding into memory\nembeddings_index = dict()\nf = open('/home/sonic/.keras/datasets/glove.6B.100d.txt')\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\nprint('Loaded %s word vectors.' % len(embeddings_index))\n", "Loaded 400000 word vectors.\n" ], [ "vocab_size = len(word_index) + 1\nprint(len(word_index))\n\n# create a weight matrix for words in training docs\nembedding_matrix = zeros((vocab_size, EMBEDDING_DIM))\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n", "97804\n" ], [ "# TODO\n# Per congrss\n", "_____no_output_____" ], [ "%%time\n\n\n# KRAFT\n\n\nprint(len(word_index))\n\n\ndef getKraftEmbeddingModel():\n # define the model\n model = Sequential()\n model.add(Embedding(vocab_size, 100,\n weights=[embedding_matrix],\n trainable=True,\n input_length=MAX_SEQUENCE_LENGTH))\n print('before flatten', model.output_shape)\n model.add(Flatten())\n print('after flatten', model.output_shape)\n model.add(Dense(1, activation='sigmoid'))\n print('dense shape', model.output_shape)\n # compile the model\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\n # summarize the model\n print(model.summary())\n return model\n\nmodel = getKraftEmbeddingModel()", "97804\nbefore flatten (None, 1000, 100)\nafter flatten (None, 100000)\ndense shape (None, 1)\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_4 (Embedding) (None, 1000, 100) 9780500 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 100000) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 100001 \n=================================================================\nTotal params: 9,880,501\nTrainable params: 9,880,501\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ], [ "from keras.initializers import glorot_uniform # Or your initializer of choice\nfrom tqdm import tqdm\n\ndef reset_weights(model):\n session = K.get_session()\n for layer in model.layers: \n if hasattr(layer, 'kernel_initializer'):\n layer.kernel.initializer.run(session=session)\n\ndef getEmbeddingModel():\n # define model\n model = Sequential()\n e = Embedding(300, EMBEDDING_DIM, input_length=300, name='embedding_layer', trainable=True)\n model.add(e)\n model.add(Flatten())\n model.add(Dense(1, activation='sigmoid', name='pred'))\n # compile the model\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\n return model\n\n# print(embedding_matrix.shape)\n# print(vocab_size)", "_____no_output_____" ], [ "def getDataset(df):\n dataset = df[['name', 'legis_num', 'vote']]\n dataset['bill_id'] = dataset.legis_num.astype('category').cat.codes.values\n dataset['name_id'] = dataset.name.astype('category').cat.codes.values\n dataset['vote'] = dataset.vote.astype('category').cat.codes.values\n\n # dataset.drop(columns=['name', 'legis_num'], inplace=True)\n dataset = dataset.sample(frac=0.5, replace=True)\n dataset.reset_index(inplace=True)\n return dataset\n\ndef runModel(df):\n embeddinwg_learnt_all = {}\n accuracy_all = {}\n\n for name, group in df.groupby(['name_id']):\n print(name, group.iloc[0]['name'])\n\n labels = []\n padded_docs = []\n y = []\n for ind, vote in group.iterrows():\n padded_docs.append(X_emb[vote['legis_num']])\n labels.append(vote['vote'])\n\n padded_docs = np.array(padded_docs)\n labels = np.array(labels)\n\n reset_weights(model)\n # fit the model\n history = model.fit(padded_docs, labels, epochs=epochs, verbose=0)\n # plot_history(history)\n\n # evaluate the model\n loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)\n accuracy_all[name] = {'loss' : loss, 'accuracy' : accuracy}\n # print('Accuracy: %f' % (accuracy*100))\n\n embeddinwg_learnt_all[name] = model.get_layer(name='embedding_layer').get_weights()[0]\n \n return embeddinwg_learnt_all\n\n\n\ngrouped_congress = df_final.groupby('congress')\n\nfor name, group in grouped_congress:\n print('Processing congress', name)\n print('congress shape', group.shape)\n \n df_votes_filtered = df_final[df_final['congress'] == name]\n num_legistlators = len(df_votes_filtered['name'].unique())\n print('number of legistlators', num_legistlators)\n\n dataset = getDataset(df_votes_filtered)\n train, test = train_test_split(dataset, test_size=0.2)\n print('train', train.shape)\n print('test', test.shape)\n train.head()\n \n # Run the embedding model\n embeddinwg_learnt_all = runModel(train)\n\n break", "Processing congress 106\ncongress shape (242591, 26)\nnumber of legistlators 435\ntrain (97036, 6)\ntest (24260, 6)\n0 Abercrombie\n" ], [ "%%time\n\n\nepochs = 20\n\nmodel = getEmbeddingModel()\nreset_weights(model)\n\nembeddinwg_learnt_all = {}\naccuracy_all = {}\n\ni = 0\nfor name, group in train.groupby(['name_id']):\n print(name, group.iloc[0]['name'])\n \n labels = []\n padded_docs = []\n y = []\n for ind, vote in group.iterrows():\n# padded_docs.append(df_new[df_new['legis_num'] == vote['legis_num']].iloc[:,:-1])\n padded_docs.append(X_emb[vote['legis_num']])\n labels.append(vote['vote'])\n \n padded_docs = np.array(padded_docs)\n labels = np.array(labels)\n# print('X', padded_docs.shape)\n# print('y', labels.shape)\n# print(len(padded_docs[0]))\n\n reset_weights(model)\n# \n # summarize the model\n# print(model.summary())\n # fit the model\n history = model.fit(padded_docs, labels, epochs=epochs, verbose=0)\n# plot_history(history)\n\n \n \n # evaluate the model\n loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)\n accuracy_all[name] = {'loss' : loss, 'accuracy' : accuracy}\n# print('Accuracy: %f' % (accuracy*100))\n \n embeddinwg_learnt_all[name] = model.get_layer(name='embedding_layer').get_weights()[0]\n# print('pred', model.get_layer(name='pred').get_weights()[0].shape)\n# print('embedding_learnt.shape', embedding_learnt.shape)\n\n i+=1\n# if (5 == i):\n# break\n \nprint(embeddinwg_learnt_all[0].shape)\nnp.save('../data/embeddinwg_learnt_all.npy', embeddinwg_learnt_all)\n\ndf_performace = pd.DataFrame(accuracy_all)\nprint('average accuracy', df_performace.loc['accuracy'].mean())\nprint('average loss', df_performace.loc['loss'].mean())", "0 Abercrombie\n1 Abraham\n2 Ackerman\n3 Adams\n4 Aderholt\n5 Adler (NJ)\n6 Aguilar\n7 Akin\n8 Alexander\n9 Allen\n10 Altmire\n11 Amash\n12 Amodei\n13 Andrews\n14 Archer\n15 Arcuri\n16 Armey\n17 Arrington\n18 Ashford\n19 Austria\n20 Babin\n21 Baca\n22 Bachmann\n23 Bachus\n24 Bacon\n25 Baird\n26 Baker\n27 Baldacci\n28 Baldwin\n29 Ballance\n30 Ballenger\n31 Banks (IN)\n32 Barber\n33 Barcia\n34 Barletta\n35 Barr\n36 Barragán\n37 Barrett\n38 Barrett (NE)\n39 Barrett (SC)\n40 Barrett (WI)\n41 Barrow\n42 Barrow (GA)\n43 Bartlett\n44 Bartlett (MD)\n45 Barton\n46 Barton (TX)\n47 Bass\n48 Bass (CA)\n49 Bass (NH)\n50 Bateman\n51 Bean\n52 Beatty\n53 Beauprez\n54 Becerra\n55 Bell\n56 Benishek\n57 Bentivolio\n58 Bentsen\n59 Bera\n60 Bera (CA)\n61 Bereuter\n62 Berg\n63 Bergman\n64 Berkley\n65 Berman\n66 Berry\n67 Beutler\n68 Beyer\n69 Biggert\n70 Biggs\n71 Bilbray\n72 Bilirakis\n73 Bishop\n74 Bishop (GA)\n75 Bishop (MI)\n76 Bishop (NY)\n77 Bishop (UT)\n78 Black\n79 Blackburn\n80 Blagojevich\n81 Bliley\n82 Blum\n83 Blumenauer\n84 Blunt\n85 Blunt Rochester\n86 Boccieri\n87 Boehlert\n88 Boehner\n89 Bonamici\n90 Bonilla\n91 Bonior\n92 Bonner\n93 Bono\n94 Bono Mack\n95 Boozman\n96 Bordallo\n97 Boren\n98 Borski\n99 Bost\n100 Boswell\n101 Boucher\n102 Boustany\n103 Boyd\n104 Boyd (FL)\n105 Boyda (KS)\n106 Boyle (PA)\n107 Boyle, Brendan F.\n108 Bradley (NH)\n109 Brady (PA)\n110 Brady (TX)\n111 Braley (IA)\n112 Brat\n113 Bridenstine\n114 Bright\n115 Brooks\n116 Brooks (AL)\n117 Brooks (IN)\n118 Broun (GA)\n119 Brown (FL)\n120 Brown (MD)\n121 Brown (OH)\n122 Brown (SC)\n123 Brown, Corrine\n124 Brown-Waite, Ginny\n125 Brownley (CA)\n126 Bryant\n127 Buchanan\n128 Buck\n129 Bucshon\n130 Budd\n131 Buerkle\n132 Burgess\n133 Burns\n134 Burr\n135 Burton\n136 Burton (IN)\n137 Bustos\n138 Butterfield\n139 Buyer\n140 Byrne\n141 Callahan\n142 Calvert\n143 Camp\n144 Camp (MI)\n145 Campbell\n146 Campbell (CA)\n147 Canady\n148 Cannon\n149 Canseco\n150 Cantor\n151 Cao\n152 Capito\n153 Capps\n154 Capuano\n155 Carbajal\n156 Cardin\n157 Cardoza\n158 Carnahan\n159 Carney\n160 Carson\n161 Carson (IN)\n162 Carson (OK)\n163 Carter\n164 Carter (GA)\n165 Carter (TX)\n166 Cartwright\n167 Case\n168 Cassidy\n169 Castle\n170 Castor\n171 Castor (FL)\n172 Castro (TX)\n173 Cazayoux\n174 Chabot\n175 Chaffetz\n176 Chambliss\n177 Chandler\n178 Cheney\n179 Chenoweth-Hage\n180 Childers\n181 Chocola\n182 Christensen\n183 Chu\n184 Chu (CA)\n185 Chu, Judy\n186 Cicilline\n187 Clark (MA)\n188 Clarke\n189 Clarke (MI)\n190 Clarke (NY)\n191 Clawson (FL)\n192 Clay\n193 Clayton\n194 Cleaver\n195 Clement\n196 Clyburn\n197 Coble\n198 Coburn\n199 Coffman\n200 Coffman (CO)\n201 Cohen\n202 Cole\n203 Cole (OK)\n204 Collins\n205 Collins (GA)\n206 Collins (NY)\n207 Combest\n208 Comer\n209 Comstock\n210 Conaway\n211 Condit\n212 Connolly\n213 Connolly (VA)\n214 Conyers\n215 Cook\n216 Cooksey\n217 Cooper\n218 Correa\n219 Costa\n220 Costello\n221 Costello (PA)\n222 Cotton\n223 Courtney\n224 Cox\n225 Coyne\n226 Cramer\n227 Crane\n228 Cravaack\n229 Crawford\n230 Crenshaw\n231 Crist\n232 Critz\n233 Crowley\n234 Cubin\n235 Cuellar\n236 Culberson\n237 Cummings\n238 Cunningham\n239 Curbelo (FL)\n240 Curson (MI)\n241 Curtis\n242 Cárdenas\n243 Dahlkemper\n244 Daines\n245 Danner\n246 Davidson\n247 Davis (AL)\n248 Davis (CA)\n249 Davis (FL)\n250 Davis (IL)\n251 Davis (KY)\n252 Davis (TN)\n253 Davis (VA)\n254 Davis, Danny\n255 Davis, David\n256 Davis, Jo Ann\n257 Davis, Lincoln\n258 Davis, Rodney\n259 Davis, Thomas M.\n260 Davis, Tom\n261 DeFazio\n262 DeGette\n263 DeLauro\n264 DeLay\n265 DeMint\n266 DeSantis\n267 DeSaulnier\n268 Deal\n269 Deal (GA)\n270 DelBene\n271 Delahunt\n272 Delaney\n273 Demings\n274 Denham\n275 Dent\n276 DesJarlais\n277 Deutch\n278 Deutsch\n279 Diaz-Balart\n280 Diaz-Balart, L.\n281 Diaz-Balart, M.\n282 Dickey\n283 Dicks\n284 Dingell\n285 Dixon\n286 Djou\n287 Doggett\n288 Dold\n289 Donnelly\n290 Donnelly (IN)\n291 Donovan\n292 Dooley\n293 Dooley (CA)\n294 Doolittle\n295 Doyle\n296 Doyle (PA)\n297 Doyle, Michael F.\n298 Drake\n299 Dreier\n300 Driehaus\n301 Duckworth\n302 Duffy\n303 Duncan\n304 Duncan (SC)\n305 Duncan (TN)\n306 Dunn\n307 Edwards\n308 Edwards (MD)\n309 Edwards (TX)\n310 Ehlers\n311 Ehrlich\n312 Ellison\n313 Ellmers\n314 Ellmers (NC)\n315 Ellsworth\n316 Emanuel\n317 Emerson\n318 Emmer\n319 Emmer (MN)\n320 Engel\n321 English\n322 English (PA)\n323 Enyart\n324 Eshoo\n325 Espaillat\n326 Estes (KS)\n327 Esty\n328 Esty (CT)\n329 Etheridge\n330 Evans\n331 Everett\n332 Ewing\n333 Faleomavaega\n334 Fallin\n335 Farenthold\n336 Farr\n337 Faso\n338 Fattah\n339 Feeney\n340 Ferguson\n341 Filner\n342 Fincher\n343 Fitzpatrick\n344 Fitzpatrick (PA)\n345 Flake\n346 Fleischmann\n347 Fleming\n348 Fletcher\n349 Flores\n350 Foley\n351 Forbes\n352 Ford\n353 Fortenberry\n354 Fortuño\n355 Fossella\n356 Foster\n357 Fowler\n358 Foxx\n359 Frank\n360 Frank (MA)\n361 Frankel (FL)\n362 Franks (AZ)\n363 Franks (NJ)\n364 Frelinghuysen\n365 Frost\n366 Fudge\n367 Gabbard\n368 Gaetz\n369 Gallagher\n370 Gallegly\n371 Gallego\n372 Ganske\n373 Garamendi\n374 Garcia\n375 Gardner\n376 Garrett\n377 Garrett (NJ)\n378 Gejdenson\n379 Gekas\n380 Gephardt\n381 Gerlach\n382 Gianforte\n383 Gibbons\n384 Gibbs\n385 Gibson\n386 Giffords\n387 Gilchrest\n388 Gillibrand\n389 Gillmor\n390 Gilman\n391 Gingrey\n392 Gingrey (GA)\n393 Gohmert\n394 Gomez\n395 Gonzalez\n396 Gonzalez (TX)\n397 Goode\n398 Goodlatte\n399 Goodling\n400 Gordon\n401 Gordon (TN)\n402 Gosar\n403 Goss\n404 Gottheimer\n405 Gowdy\n406 Graham\n407 Granger\n408 Graves\n409 Graves (GA)\n410 Graves (LA)\n411 Graves (MO)\n412 Grayson\n413 Green (TX)\n414 Green (WI)\n415 Green, Al\n416 Green, Gene\n417 Greenwood\n418 Griffin (AR)\n419 Griffith\n420 Griffith (VA)\n421 Grijalva\n422 Grimm\n423 Grothman\n424 Grucci\n425 Guinta\n426 Guthrie\n427 Gutierrez\n428 Gutiérrez\n429 Gutknecht\n430 Hahn\n431 Hall\n432 Hall (NY)\n433 Hall (OH)\n434 Hall (TX)\n435 Halvorson\n436 Hanabusa\n437 Handel\n438 Hanna\n439 Hansen\n440 Hardy\n441 Hare\n442 Harman\n443 Harper\n444 Harris\n445 Hart\n446 Hartzler\n447 Hastert\n448 Hastings\n449 Hastings (FL)\n450 Hastings (WA)\n451 Hayes\n452 Hayworth\n453 Heck\n454 Heck (NV)\n455 Heck (WA)\n456 Hefley\n457 Heinrich\n458 Heller\n459 Hensarling\n460 Herger\n461 Herrera Beutler\n462 Herseth\n463 Herseth Sandlin\n464 Hice (GA)\n465 Hice, Jody B.\n466 Higgins\n467 Higgins (LA)\n468 Higgins (NY)\n469 Hill\n470 Hill (IN)\n471 Hill (MT)\n472 Hilleary\n473 Hilliard\n474 Himes\n475 Hinchey\n476 Hinojosa\n477 Hirono\n478 Hobson\n479 Hochul\n480 Hodes\n481 Hoeffel\n482 Hoekstra\n483 Holden\n484 Holding\n485 Hollingsworth\n486 Holt\n487 Honda\n488 Hooley\n489 Hooley (OR)\n490 Horn\n491 Horsford\n492 Hostettler\n493 Houghton\n494 Hoyer\n495 Hudson\n496 Huelskamp\n497 Huffman\n498 Huizenga\n499 Huizenga (MI)\n500 Hulshof\n501 Hultgren\n502 Hunter\n503 Hurd\n504 Hurd (TX)\n505 Hurt\n506 Hurt (VA)\n507 Hutchinson\n508 Hyde\n509 Inglis\n510 Inglis (SC)\n511 Inslee\n512 Isakson\n513 Israel\n514 Issa\n515 Istook\n516 Jackson (IL)\n517 Jackson Lee\n518 Jackson Lee (TX)\n519 Jackson-Lee (TX)\n520 Janklow\n521 Jayapal\n522 Jefferson\n523 Jeffries\n524 Jenkins\n525 Jenkins (KS)\n526 Jenkins (WV)\n527 Jindal\n528 John\n529 Johnson (CT)\n530 Johnson (GA)\n531 Johnson (IL)\n532 Johnson (LA)\n533 Johnson (OH)\n534 Johnson, E. B.\n535 Johnson, Sam\n536 Jolly\n537 Jones\n538 Jones (NC)\n539 Jones (OH)\n540 Jordan\n541 Jordan (OH)\n542 Joyce\n543 Joyce (OH)\n544 Kagen\n545 Kanjorski\n546 Kaptur\n547 Kasich\n548 Katko\n549 Keating\n550 Keller\n551 Kelly\n552 Kelly (IL)\n553 Kelly (MS)\n554 Kelly (PA)\n555 Kennedy\n556 Kennedy (MN)\n557 Kennedy (RI)\n558 Kerns\n559 Khanna\n560 Kihuen\n561 Kildee\n562 Kilmer\n563 Kilpatrick\n564 Kilpatrick (MI)\n565 Kilroy\n566 Kind\n567 Kind (WI)\n568 King (IA)\n569 King (NY)\n570 Kingston\n571 Kinzinger\n572 Kinzinger (IL)\n573 Kirk\n574 Kirkpatrick\n575 Kirkpatrick (AZ)\n576 Kissell\n577 Kleczka\n578 Klein (FL)\n579 Kline\n580 Kline (MN)\n581 Klink\n582 Knight\n583 Knollenberg\n584 Kolbe\n585 Kosmas\n586 Kratovil\n587 Krishnamoorthi\n588 Kucinich\n589 Kuhl (NY)\n590 Kuster\n591 Kuster (NH)\n592 Kustoff (TN)\n593 Kuykendall\n594 LaFalce\n595 LaHood\n596 LaMalfa\n597 LaTourette\n598 Labrador\n599 Lamborn\n600 Lampson\n601 Lance\n602 Landry\n603 Langevin\n604 Lankford\n605 Lantos\n606 Largent\n607 Larsen (WA)\n608 Larson\n609 Larson (CT)\n610 Latham\n611 Latta\n612 Lawrence\n613 Lawson (FL)\n614 Lazio\n615 Leach\n616 Lee\n617 Lee (CA)\n618 Lee (NY)\n619 Levin\n620 Lewis\n621 Lewis (CA)\n622 Lewis (GA)\n623 Lewis (KY)\n624 Lewis (MN)\n625 Lieu (CA)\n626 Lieu, Ted\n627 Linder\n628 Lipinski\n629 LoBiondo\n630 Loebsack\n631 Lofgren\n632 Lofgren, Zoe\n633 Long\n634 Loudermilk\n635 Love\n636 Lowenthal\n637 Lowey\n638 Lucas\n639 Lucas (KY)\n640 Lucas (OK)\n641 Luetkemeyer\n642 Lujan Grisham (NM)\n643 Lujan Grisham, M.\n" ], [ "embeddinwg_learnt_all = np.load('../data/embeddinwg_learnt_all.npy')\nembeddinwg_learnt_all.item()[0].shape", "_____no_output_____" ], [ "len(embeddinwg_learnt_all.item())", "_____no_output_____" ], [ "# 3 inputs\n# bill emb (4062, 50) <- CF\n# legistlator emb (1118, 50) <- CF \n# legistlator_bill emb (1118, 300, 100) <- embedding\n", "_____no_output_____" ] ], [ [ "# Matrix Factorisation \n\n", "_____no_output_____" ] ], [ [ "def get_matrix_factorisation():\n movie_input = keras.layers.Input(shape=[1],name='Item')\n movie_embedding = keras.layers.Embedding(n_bill + 1, n_latent_factors, name='Movie-Embedding')(movie_input)\n movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)\n\n user_input = keras.layers.Input(shape=[1],name='User')\n user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, \n n_latent_factors,\n name='User-Embedding')(user_input))\n\n prod = keras.layers.dot([movie_vec, user_vec], axes=1, name='DotProduct')\n model = keras.Model([user_input, movie_input], prod)\n model.compile('adam', 'mean_squared_error')\n # SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='HB').create(prog='dot', format='svg'))\n return model", "_____no_output_____" ], [ "epochs=5\n\nmodel = get_matrix_factorisation()\nmodel.summary()\n\nhistory = model.fit([train.name_id, train.bill_id], \n train.vote, epochs=epochs, verbose=1)\n\nplot_history(history)", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nItem (InputLayer) (None, 1) 0 \n__________________________________________________________________________________________________\nUser (InputLayer) (None, 1) 0 \n__________________________________________________________________________________________________\nMovie-Embedding (Embedding) (None, 1, 50) 203100 Item[0][0] \n__________________________________________________________________________________________________\nUser-Embedding (Embedding) (None, 1, 50) 55900 User[0][0] \n__________________________________________________________________________________________________\nFlattenMovies (Flatten) (None, 50) 0 Movie-Embedding[0][0] \n__________________________________________________________________________________________________\nFlattenUsers (Flatten) (None, 50) 0 User-Embedding[0][0] \n__________________________________________________________________________________________________\nDotProduct (Dot) (None, 1) 0 FlattenMovies[0][0] \n FlattenUsers[0][0] \n==================================================================================================\nTotal params: 259,000\nTrainable params: 259,000\nNon-trainable params: 0\n__________________________________________________________________________________________________\nEpoch 1/5\n2041478/2041478 [==============================] - 111s 55us/step - loss: 0.1727\nEpoch 2/5\n2041478/2041478 [==============================] - 110s 54us/step - loss: 0.1484\nEpoch 3/5\n2041478/2041478 [==============================] - 113s 55us/step - loss: 0.1464\nEpoch 4/5\n2041478/2041478 [==============================] - 112s 55us/step - loss: 0.1455\nEpoch 5/5\n2041478/2041478 [==============================] - 111s 54us/step - loss: 0.1450\n loss\ncount 5.000000\nmean 0.151598\nstd 0.011874\nmin 0.144950\n25% 0.145496\n50% 0.146418\n75% 0.148418\nmax 0.172708\n" ], [ "y_hat = np.round(model.predict([test.name_id, test.bill_id]),0)\ny_true = test.vote\n\nfrom sklearn.metrics import mean_absolute_error\nmean_absolute_error(y_true, y_hat)", "_____no_output_____" ], [ "movie_embedding_learnt = model.get_layer(name='Movie-Embedding').get_weights()[0]\nuser_embedding_learnt = model.get_layer(name='User-Embedding').get_weights()[0]\nprint('movie_embedding_learnt.shape', movie_embedding_learnt.shape)\nprint('user_embedding_learnt.shape', user_embedding_learnt.shape)\npd.DataFrame(movie_embedding_learnt).describe()", "movie_embedding_learnt.shape (4062, 50)\nuser_embedding_learnt.shape (1118, 50)\n" ] ], [ [ "# SHARED MODEL", "_____no_output_____" ], [ "# Non-negative Matrix factorisation (NNMF) in Keras\n", "_____no_output_____" ] ], [ [ "from keras.constraints import non_neg\n\ndef get_NNMF():\n movie_input = keras.layers.Input(shape=[1],name='Item')\n movie_embedding = keras.layers.Embedding(n_bill + 1, n_latent_factors, \n name='NonNegMovie-Embedding', embeddings_constraint=non_neg())(movie_input)\n movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)\n\n user_input = keras.layers.Input(shape=[1],name='User')\n user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,\n name='NonNegUser-Embedding',embeddings_constraint=non_neg())(user_input))\n\n prod = keras.layers.dot([movie_vec, user_vec], axes=1,name='DotProduct')\n model = keras.Model([user_input, movie_input], prod)\n model.compile('adam', 'mean_squared_error')\n return model\n\nmodel = get_NNMF()\nprint(model.summary())\n\nhistory_nonneg = model.fit([train.name_id, train.bill_id], \n train.vote, epochs=epochs, verbose=1)\n\nplot_history(history_nonneg)\n\nmovie_embedding_learnt = model.get_layer(name='NonNegMovie-Embedding').get_weights()[0]\nprint(movie_embedding_learnt.shape)\n# pd.DataFrame(movie_embedding_learnt).describe()", "_____no_output_____" ] ], [ [ "# Neural networks for recommendation", "_____no_output_____" ] ], [ [ "n_latent_factors_user = 5\nn_latent_factors_movie = 50\n\ndef get_nueral_net():\n movie_input = keras.layers.Input(shape=[1],name='Item')\n movie_embedding = keras.layers.Embedding(n_bill + 1, n_latent_factors_movie, name='Movie-Embedding')(movie_input)\n movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)\n movie_vec = keras.layers.Dropout(0.2)(movie_vec)\n\n\n user_input = keras.layers.Input(shape=[1],name='User')\n user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors_user,name='User-Embedding')(user_input))\n user_vec = keras.layers.Dropout(0.2)(user_vec)\n\n\n concat = keras.layers.concatenate([movie_vec, user_vec], name='Concat')\n concat_dropout = keras.layers.Dropout(0.2)(concat)\n dense = keras.layers.Dense(200,name='FullyConnected')(concat)\n dropout_1 = keras.layers.Dropout(0.2,name='Dropout')(dense)\n dense_2 = keras.layers.Dense(100,name='FullyConnected-1')(concat)\n dropout_2 = keras.layers.Dropout(0.2,name='Dropout')(dense_2)\n dense_3 = keras.layers.Dense(50,name='FullyConnected-2')(dense_2)\n dropout_3 = keras.layers.Dropout(0.2,name='Dropout')(dense_3)\n dense_4 = keras.layers.Dense(20,name='FullyConnected-3', activation='relu')(dense_3)\n\n\n result = keras.layers.Dense(1, activation='relu',name='Activation')(dense_4)\n adam = Adam(lr=0.005)\n model = keras.Model([user_input, movie_input], result)\n model.compile(optimizer=adam,loss= 'mean_absolute_error')\n# SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='HB').create(prog='dot', format='svg'))\n return model", "_____no_output_____" ], [ "\nmodel = get_nueral_net()\nmodel.summary()\nhistory = model.fit([train.name_id, train.bill_id], train.vote, \n epochs=epochs, verbose=1)\nplot_history(history)", "_____no_output_____" ], [ "y_hat_2 = np.round(model.predict([test.name_id, test.bill_id]),0)\nprint(mean_absolute_error(y_true, y_hat_2))\n\nprint(mean_absolute_error(y_true, model.predict([test.name_id, test.bill_id])))", "_____no_output_____" ], [ "movie_embedding_learnt = model.get_layer(name='Movie-Embedding').get_weights()[0]\nprint(movie_embedding_learnt.shape)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d05a1dafc31057b0c38a00c4ca5f36d75b1bebe2
138,988
ipynb
Jupyter Notebook
auth.ipynb
IHIMEKPEN/auth-webscraping
92278fdcceed37217d178e2405edde0893ffa266
[ "MIT" ]
null
null
null
auth.ipynb
IHIMEKPEN/auth-webscraping
92278fdcceed37217d178e2405edde0893ffa266
[ "MIT" ]
null
null
null
auth.ipynb
IHIMEKPEN/auth-webscraping
92278fdcceed37217d178e2405edde0893ffa266
[ "MIT" ]
null
null
null
39.384528
368
0.423972
[ [ [ "import requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport datetime\nfrom pprint import pprint\n\n", "_____no_output_____" ], [ "import requests\n\ncookies = {\n 'PHPSESSID': 'ojkufbntbbvn59v1qae1f6vnt7',\n}\n\nheaders = {\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-User': '?1',\n 'Sec-Fetch-Dest': 'document',\n 'Referer': 'https://eportal.oauife.edu.ng/result_check1.php',\n 'Accept-Language': 'en-US,en;q=0.9',\n}\n\nresponse = requests.get('https://eportal.oauife.edu.ng/displayresult.php', headers=headers, cookies=cookies)", "_____no_output_____" ], [ "\n#html parsing\nsoup=bs(response.content,\"html.parser\")", "_____no_output_____" ], [ "print(soup)", "<!DOCTYPE html>\n\n<html lang=\"en\">\n<head>\n<title>Obafemi Awolowo University - Result Check</title>\n<meta charset=\"utf-8\"/>\n<link href=\"images/favicon.ico\" rel=\"icon\"/>\n<link href=\"images/favicon.ico\" rel=\"shortcut icon\">\n<link href=\"css/unoslider/unoSlider.css\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\"/>\n<link href=\"css/style.css\" rel=\"stylesheet\"/>\n<link href=\"css/slider.css\" rel=\"stylesheet\"/>\n<link href=\"css/form.css\" rel=\"stylesheet\"/>\n<link href=\"css/notification.css\" rel=\"stylesheet\"/>\n<link href=\"css/jquery-ui/jquery-ui.css\" rel=\"stylesheet\"/>\n<script src=\"js/jquery/jquery-1.10.2.js\"></script>\n<script src=\"js/jquery/jquery-ui.js\"></script>\n<!-- <script src=\"js/jquery.js\"></script> -->\n<script src=\"js/jquery-migrate-1.1.1.js\"></script>\n<script src=\"js/superfish.js\"></script>\n<script src=\"js/sForm.js\"></script>\n<script src=\"js/jquery.equalheights.js\"></script>\n<script src=\"js/jquery.easing.1.3.js\"></script>\n<script src=\"js/tms-0.4.1.js\"></script>\n<script src=\"js/jquery.carouFredSel-6.1.0-packed.js\"></script>\n<script src=\"js/jquery.touchSwipe.min.js\"></script>\n<script src=\"js/unoslider/unoSlider.min.js\"></script>\n<!-- <script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.8.0/jquery.min.js\"></script>-->\n<link href=\"/css/dropit/dropit.css\" rel=\"stylesheet\" type=\"text/css\">\n<link href=\"css/bootstrap.min.css\" rel=\"stylesheet\"/>\n<link href=\"css/passwordcheck.css\" rel=\"stylesheet\"/>\n<script>\n\t\t\t$(window).load(function(){\n\t\t\t$('.slider')._TMS({\n\t\t\t\tshow:0,\n\t\t\t\tpauseOnHover:true,\n\t\t\t\tprevBu:'.prev',\n\t\t\t\tnextBu:'.next',\n\t\t\t\tplayBu:false,\n\t\t\t\tduration:800,\n\t\t\t\tpreset:'fade',\n\t\t\t\tpagination:false,//'.pagination',true,'<ul></ul>'\n\t\t\t\tpagNums:false,\n\t\t\t\tslideshow:8000,\n\t\t\t\tnumStatus:false,\n\t\t\t\tbanners:true,\n\t\t\t\twaitBannerAnimation:false,\n\t\t\t\tprogressBar:false\n\t\t\t})\n\t\t\t});\n\t\t\t$(window).load (\n\t\t\tfunction(){$('.carousel1').carouFredSel({auto: false,prev: '.prev1',next: '.next1', width: 1030, items: {\n\t\t\t\tvisible : {min: 1,\n\t\t\t\tmax: 4\n\t\t\t\t},\n\t\t\t\theight: 'auto',\n\t\t\t\twidth: 157,\n\t\t\t}, responsive: true,\n\t\t\tscroll: 1,\n\t\t\tmousewheel: false,\n\t\t\tswipe: {onMouse: true, onTouch: true}});\n\t\t\t});\n\t\t</script>\n<script>\n\t\t $(function() {\n\t\t $( \"#menu\" ).menu();\n\t\t });\n\t\t </script>\n<style>\n\t\t .ui-menu { width: 250px; margin-left: 10px; }\n\t\t </style>\n<script>\n</script>\n<style>\n\t\t label {\n\t\t display: inline-block;\n\t\t /*width: 5em;*/\n\t\t }\n\t\t </style>\n<script>\n $(function() {\n $( \"#dialog\" ).dialog();\n });\n </script>\n<script type=\"text/javascript\">\n\t$(document).ready(function() {\n\t\t// My Uno Slider\n\t\twindow.unoSlider = $('#sliderId').unoSlider();\n\t});\n</script>\n<style>\n</style>\n<link href=\"//cdn.datatables.net/1.10.9/css/jquery.dataTables.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script charset=\"utf8\" src=\"//cdn.datatables.net/1.10.9/js/jquery.dataTables.js\" type=\"text/javascript\"></script>\n<script src=\"js/bootstrap.min.js\" type=\"text/javascript\"></script>\n<script src=\"js/jquery.dform-1.1.0.js\" type=\"text/javascript\"></script>\n<script src=\"js/jquery.validate.min.js\" type=\"text/javascript\"></script>\n<script src=\"js/passwordcheck.js\"></script>\n</link></link></head>\n<body class=\"page1\">\n<!--==============================header=================================-->\n<header>\n<div class=\"container_12\">\n<div class=\"grid_12\">\n<div class=\"h_organization\">Obafemi Awolowo University</div>\n<div class=\"h_portal\">Student Information Portal</div>\n<h1><a href=\"home.php\"><img alt=\"Obafemi Awolowo University ePortal\" height=\"90\" src=\"images/oaulogo.png\"/></a></h1>\n<div class=\"clear\"></div>\n</div>\n<div class=\"clear\"></div>\n</div>\n<div class=\"menu_block\">\n<div class=\"container_12\">\n<div class=\"grid_12 menu\">\n<!--<div class=\"socials\"><a href=\"#\"></a><a href=\"#\"></a></div>\n\t\t\t\t\t\t<div class=\"autor\">\n\t\t\t\t\t\t\t<a href=\"#\">User Login</a>\n\t\t\t\t\t\t\tSocial\n\t\t\t\t\t\t</div>-->\n<nav class=\"\">\n<ul class=\"sf-menu\">\n<li class=\"current\"><a href=\"home.php\">Home</a></li>\n<li class=\"with_ul\"><a href=\"#\">Students</a>\n<ul>\n<li><a href=\"undergraduatetasks.php\">Undergraduate</a></li>\n<li><a href=\"login.php\">Postgraduate</a></li>\n<li><a href=\"mbatasks.php\">Executive MBA</a></li>\n</ul>\n</li>\n<li><a href=\"#\">Staff</a>\n<ul>\n<li><a href=\"login.php\"> Staff Login</a>\n<!-- <ul>\n\t\t\t\t\t\t\t\t\t\t\t\t<li><a href=\"#\">Seeds</a></li>\n\t\t\t\t\t\t\t\t\t\t\t\t<li><a href=\"#\">Traits</a></li>\n\t\t\t\t\t\t\t\t\t\t\t\t<li><a href=\"#\">Safety Control</a></li>\n\t\t\t\t\t\t\t\t\t\t\t</ul>-->\n</li>\n<li><a href=\"paymenttasks.php\">Bursary</a></li>\n<!-- <li><a href=\"#\">FAQS</a></li>-->\n</ul>\n</li>\n<li><a href=\"faqs.php\">FAQs</a></li>\n<li><a href=\"contact.php\">Contact Us</a></li>\n<!-- <li><a href=\"index-5.html\">Contacts</a></li>-->\n</ul>\n</nav>\n<div class=\"clear\"></div>\n</div>\n<div style=\"float: right;padding-top: 20px;font-size: 12px;color: gold;padding-right: 40px;\"> \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tWelcome <a href=\"undergraduatetasks.php\">IHIMEKPEN OSEMUDIAMEN ANDREW</a>, <a href=\"signout.php\">Sign Out</a>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</header>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for HARMATTAN SEMESTER 2015/2016 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>CHM101</td>\n<td>Introductory Chemistry I</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t55C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>CHM103</td>\n<td>Experimental Chemistry I</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t75A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>MTH101</td>\n<td>Elementary Mathematics I</td>\n<td>5</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t66B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>PHY101</td>\n<td>General Physics I</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t42E\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>PHY107</td>\n<td>Experimental Physics IA</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t64B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>SEO001</td>\n<td>Fundamentals of Human Behaviour</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t60B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>SEP001</td>\n<td>Drugs and Society I</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t53C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>TPD101</td>\n<td>Engineers in Society</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t50C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>0</th>\n<th>0</th>\n<th>0</th>\n<th>16</th>\n<th>48</th>\n<th>3</th>\n<th>16</th>\n<th>48</th>\n<th>3</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for RAIN SEMESTER 2015/2016 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>CHM102</td>\n<td>Introductory Chemistry II</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t42E\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>CHM104</td>\n<td>Experimental Chemistry II</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t66B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>MTH102</td>\n<td>Elementary Mathematics II</td>\n<td>5</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t73A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>MTH104</td>\n<td>Vectors</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t61B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>PHY102</td>\n<td>General Physics II</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t50C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>PHY108</td>\n<td>Experimental Physics 1B</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t64B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>SER001</td>\n<td>Use of English</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t45D\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>16</th>\n<th>48</th>\n<th>3</th>\n<th>17</th>\n<th>57</th>\n<th>3.35</th>\n<th>33</th>\n<th>105</th>\n<th>3.18</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for HARMATTAN SEMESTER 2016/2017 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>CHE201</td>\n<td>Engineering Thermodynamics</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t80A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>CSC201</td>\n<td>Computer Programming</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t57C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE203</td>\n<td>Fundamentals of Electronic/Electrical Eng 1</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t74A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE281</td>\n<td>Fundamentals of Electronic/Electrical Eng 1 Lab</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t42E\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>MEE203</td>\n<td>Engineering Drawing I</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t54C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>MEE205</td>\n<td>Engineering Mechanics I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t55C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>MSE201</td>\n<td>Elements of Engineering Materials</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t65B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>MTH201</td>\n<td>Mathematical Methods I</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t62B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>SEE001</td>\n<td>Indigenous Education in Nigeria</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t40E\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>33</th>\n<th>105</th>\n<th>3.18</th>\n<th>22</th>\n<th>83</th>\n<th>3.77</th>\n<th>55</th>\n<th>188</th>\n<th>3.42</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for RAIN SEMESTER 2016/2017 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>AGE202</td>\n<td>Workshop Practice</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t56C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE204</td>\n<td>Fundamentals of Electronic/Electrical Eng 2</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t75A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE206</td>\n<td>Introduction to Computer Packages</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t72A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE282</td>\n<td>Fundamentals of Electronic/Electrical Eng 2 Lab</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t60B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>MEE206</td>\n<td>Engineering Mechanics II</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t52C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>MTH202</td>\n<td>Mathematical Methods II</td>\n<td>4</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t60B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>SEA002</td>\n<td>Elements of Business Administration</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t50C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>SEM002</td>\n<td>Issues in Land Use and Management</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t58C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>SEO004</td>\n<td>Business Environment &amp; Approaches to Bus. start-up</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t45D\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>55</th>\n<th>188</th>\n<th>3.42</th>\n<th>16</th>\n<th>65</th>\n<th>4.06</th>\n<th>71</th>\n<th>253</th>\n<th>3.56</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for HARMATTAN SEMESTER 2017/2018 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>CHE305</td>\n<td>Engineering Analysis I </td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t73A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE301</td>\n<td>Microelectronic Devices and Circuits I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t65B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE303</td>\n<td>Electromechanical Devices</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t45D\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE305</td>\n<td>Computational Structures I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t67B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE307</td>\n<td>Group Design I</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t85A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE309</td>\n<td>Signals and Systems</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t70A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE311</td>\n<td>Electromagnetic Theory</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t79A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE391</td>\n<td>Electrotechnics Laboratory I</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t54C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>71</th>\n<th>253</th>\n<th>3.56</th>\n<th>21</th>\n<th>86</th>\n<th>4.1</th>\n<th>92</th>\n<th>339</th>\n<th>3.69</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for RAIN SEMESTER 2017/2018 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>AGE302</td>\n<td>Statistics for Engineers</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t60B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>CHE306</td>\n<td>Engineering Analysis II</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t60B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE302</td>\n<td>Microelectronic Devices and Circuits I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t57C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE304</td>\n<td>Electrical Machines</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t52C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE306</td>\n<td>Computational Structures II</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t61B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE308</td>\n<td>Digital Circuit Analysis and Design</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t63B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE310</td>\n<td>Measurement and Instrumentation I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t70A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE392</td>\n<td>Electrotechnics Laboratory II</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t52C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>92</th>\n<th>339</th>\n<th>3.69</th>\n<th>22</th>\n<th>83</th>\n<th>3.77</th>\n<th>114</th>\n<th>422</th>\n<th>3.7</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Repeat</td>\n<td>Courses Still Outstanding</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for HARMATTAN SEMESTER 2018/2019 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>CVE401</td>\n<td>Technical Report Writing</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t61B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE401</td>\n<td>Group Design II</td>\n<td>1</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t74A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE403</td>\n<td>Electric Power Principles</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t43E\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE405</td>\n<td>Analogue Circuit Design</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t52C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE407</td>\n<td>Introduction to Control Engineering</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t70A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE409</td>\n<td>Communication Principles</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t73A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE411</td>\n<td>Applied Quantum Mechanics</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t58C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE491</td>\n<td>Electronic &amp; Electrical Engineering Laboratory</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t57C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>TPD501</td>\n<td>Industrial Economics</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t43E\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>114</th>\n<th>422</th>\n<th>3.7</th>\n<th>21</th>\n<th>69</th>\n<th>3.29</th>\n<th>135</th>\n<th>491</th>\n<th>3.64</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for RAIN SEMESTER 2018/2019 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>EEE200</td>\n<td>Students Work Experience</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t72A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE300</td>\n<td>Students Industrial Work Experience Scheme I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t77A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE400</td>\n<td>Students Industrial Work Experience Scheme II</td>\n<td>9</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t68B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<th>135</th>\n<th>491</th>\n<th>3.64</th>\n<th>15</th>\n<th>66</th>\n<th>4.4</th>\n<th>150</th>\n<th>557</th>\n<th>3.71</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Pass</td>\n<td>Pass</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--=======content================================-->\n<div class=\"content\">\n<div class=\"black\">\n<div class=\"container_12 classify\">\n<div class=\"grid_12 profile courseform\">\n<div style=\"display: block; margin:10px 0px; padding:0;\">\n<div class=\"info\" style=\"\">\n<h4>Disclaimer</h4>\n<p>\n\t\t\t\t\t\t\t\t\t\tThe results given below is only provisional and is subject to confirmation by senate. Accordingly, the Directorate accepts no responsibility for errors or omissions caused as a result of transmission via the Internet or downloading or printing by the user, nor for changes or variation by the University before they are finally released.\n\t\t\t\t\t\t\t\t\t</p>\n</div>\n</div>\n<div class=\"col1\" style=\"margin-top: 25px;\">\n<h4>EEG/2015/057 - IHIMEKPEN Osemudiamen Andrew - B.Sc. Electronic and Electrical Engineering</h4>\n<h4>Statement of Results for HARMATTAN SEMESTER 2019/2020 SESSION</h4>\n<div>\n<p class=\"heading\">A) SEMESTER OFFERED COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Score</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>EEE501</td>\n<td>Final Year Project I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tAR\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE503</td>\n<td>Control Systems Engineering I</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t51C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE505</td>\n<td>Probability and Stochastic Processes</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t64B\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE507</td>\n<td>Measurement and Instrumentation II</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t57C\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>EEE521</td>\n<td>Introduction to Modern Control</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t74A\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"odd body\">\n<td>EEE523</td>\n<td>Instrumentation Engineering</td>\n<td>3</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t45D\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n<tr class=\"even body\">\n<td>TPD503</td>\n<td>Industrial Law and Management</td>\n<td>2</td>\n<td>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t47D\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">B) CUMULATIVE OUTSTANDING COURSES</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:15%\">Course Code</th>\n<th class=\"left\" style=\"width:60%\">Course Title</th>\n<th class=\"left\" style=\"width:15%\">Course Unit</th>\n<th class=\"left\" style=\"width:10%\">Type</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<td>EEE501</td>\n<td>Final Year Project I</td>\n<td>3</td>\n<td>AR</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">C) RESULT SUMMARY</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Previous</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Present</th>\n<th class=\"left\" colspan=\"3\" style=\"width:33%\">Cummulative</th>\n</tr>\n<tr>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n<th>TNU</th>\n<th>TCP</th>\n<th>GPA</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"even body\">\n<th>150</th>\n<th>557</th>\n<th>3.71</th>\n<th>17</th>\n<th>55</th>\n<th>3.24</th>\n<th>167</th>\n<th>612</th>\n<th>3.67</th>\n</tr>\n</tbody>\n</table>\n</div>\n<div>\n<p class=\"heading\">D) STUDENTSHIP STATUS IN THE SEMESTER:- <span class=\"good\">Active</span></p>\n</div>\n<div>\n<p class=\"heading\">E) REMARKS</p>\n<table class=\"profile\" style=\"width: 100%;\">\n<thead>\n<tr>\n<th class=\"left\" style=\"width:33%\">Semester</th>\n<th class=\"left\" style=\"width:33%\">Cummulative</th>\n</tr>\n</thead>\n<tbody>\n<tr class=\"odd body\">\n<td>Repeat</td>\n<td>Courses Still Outstanding</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</div>\n<div class=\"clear\"></div>\n</div>\n</div>\n</div>\n<script>\n \t\tfunction printDiv(){\n\t\t\tvar divToPrint=document.getElementById('res'); \n\t\t\tvar newWin=window.open('','Print-Window');\n\t\t\tnewWin.document.open();\n\t\t\tnewWin.document.write('<html><body onload=\"window.print()\">'+divToPrint.innerHTML+'</body></html>');\n\t\t\tnewWin.document.close();\n\t\t\tsetTimeout(function(){newWin.close();},10);\n\t\t }\n \n </script>\n<!--==============================footer=================================-->\n<script type=\"text/javascript\">\n\t\t$(document).ready(function() {\n\t\t $('.home-menu').dropit();\n\t\t});\t\t\n\t\t</script>\n<script src=\"js/passwordcheck.js\"></script>\n<footer>\n<div class=\"container_12\">\n<div class=\"grid_2\" style=\"display: none\">\n<div class=\"copy\">\n<a class=\"footer_logo\" href=\"index.html\"><img alt=\"\" src=\"images/footer_logo.png\"/></a>\t© 2013\t<a href=\"#\">Privacy Policy</a>\n</div>\n</div>\n<div class=\"grid_4\">\n<ul>\n<li><a href=\"#\">Privacy Statement</a></li>\n<li><a href=\"#\">Terms and Conditions of Use</a></li>\n<li><a href=\"#\">Legal Notice</a></li>\n<li><a href=\"faqs.php\">FAQ</a></li>\n<li><a href=\"#\">Lecture Time Table</a></li>\n</ul>\n</div>\n<div class=\"grid_4\">\n<ul>\n<li><a href=\"#\">Examination Time Table</a></li>\n<li><a href=\"contact.php\">Contact Us</a></li>\n<li><a href=\"studentonlinehelp.php\">Student Help</a></li>\n<li><a href=\"staffonlinehelp.php\">Staff Help</a></li>\n</ul>\n</div>\n<div class=\"grid_4\">\n</div>\n<div class=\"grid_3 prefix_1\" style=\"display: none\">\n<h4>Newsletter</h4>\n<form id=\"newsletter\">\n<div class=\"success\">Your subscribe request has been sent!</div>\n<label class=\"email\">\n<span>Enter e-mail address</span>\n<input type=\"email\" value=\"\"/>\n<a class=\"btn\" data-type=\"submit\" href=\"#\">Subscribe</a>\n<span class=\"error\">*This is not a valid email address.</span>\n</label>\n</form>\n</div>\n<div class=\"clear\"></div>\n</div>\n<div class=\"f_bot\">\n<div class=\"container_12\">\n<div class=\"grid_12\">© 2006 - 2021 Obafemi Awolowo University</div>\n</div>\n<div class=\"container_12\">\n<div class=\"grid_12\"><img src=\"images/remita-payment-logo-vertical.png\"/></div>\n</div>\n</div>\n</footer>\n</body>\n</html>\n" ], [ "table=soup.find_all(\"table\",{\"class\":\"profile\"})", "_____no_output_____" ], [ "data=soup.find_all(\"td\")", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "datavalues=[]\n\nfor dat in data:\n \n # dat.translate(None, '\\t\\n ')\n dat.text#main\n \n # ''.join(dat.split())\n datavalues.append(dat.text)\n\n\n\n\ndatavalues\n", "_____no_output_____" ], [ "values=[]\nfor val in datavalues:\n val=''.join(val.split())\n # print(val)\n values.append(val)", "_____no_output_____" ], [ "values", "_____no_output_____" ], [ "# values.remove('AR')\n# values.remove('Repeat')\nfor num in values[:]: #iterate over a shallow copy\n if num == 'Pass':\n values.remove(num)\n if num == 'Repeat':\n values.remove(num)\n if num == 'CoursesStillOutstanding':\n values.remove(num)\n if num == 'AR':\n values.remove(num)\n\nvalues = values[:-1]\n\n# [values for item in values if item != \"Pass\"]\nprint(len(values))", "272\n" ], [ "\nimport numpy as np\n\nreshaped_array = np. reshape(values, (68, 4))\nreshaped_array\n\n", "_____no_output_____" ], [ "# convert numpy array to dataframe\ndf = pd.DataFrame(reshaped_array, columns =['Course Code', 'Course Title', 'Course Unit', 'Score '])\nprint(\"\\nPandas DataFrame: \")\n# df.drop[72]", "\nPandas DataFrame: \n" ], [ "# df.loc['Pass']", "_____no_output_____" ], [ "# df=df.head(71)\ndf", "_____no_output_____" ], [ "df.to_csv('myresult.csv')", "_____no_output_____" ], [ "df.pop[61]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05a1dc433900926cfb5b637761bf7e954468cc9
189,991
ipynb
Jupyter Notebook
Lecture-05-26/K-Means Clustering Exercise and Lecture.ipynb
wileong/data_science_projects_directory
1a2e018bd6e8e0b97a8b6df1fa074f1a369d4318
[ "MIT" ]
null
null
null
Lecture-05-26/K-Means Clustering Exercise and Lecture.ipynb
wileong/data_science_projects_directory
1a2e018bd6e8e0b97a8b6df1fa074f1a369d4318
[ "MIT" ]
null
null
null
Lecture-05-26/K-Means Clustering Exercise and Lecture.ipynb
wileong/data_science_projects_directory
1a2e018bd6e8e0b97a8b6df1fa074f1a369d4318
[ "MIT" ]
null
null
null
190.181181
6,468
0.85234
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "data = pd.read_csv(\"/data/digits.csv\")", "_____no_output_____" ], [ "data\n#in unsupervised learning, we won't use label column", "_____no_output_____" ], [ "#supervised learning: given training inputs X and their outputs /labels Y\n\n#think about children supervising a child's learning\n\n\n#but what if we didn't have labels?\n\n#suppose i'm in a foreign country trying to read a completely foreign language...\n\n#unsupervised learning: no one is telling us what inputs are.", "_____no_output_____" ], [ "#in 2d, no need for alg\n\n#k means algorithm:\n\n#step 0: randomly assign points to clusters\n\n#step1: compute centroid of k clusters\n\n#step 2: assign ea pt of clister of its nearest centroid\n\n#repeat step1 and step2 until it stabilizes\n\nX = data.ix[:, \"pixel0\":\"pixel783\"]", "_____no_output_____" ], [ "\n#.ix[0,: ] represents a flat row of pixels\nplt.matshow(X.ix[0, :].reshape(28, 28), cmap=\"binary\")\nplt.matshow(X.ix[1, :].reshape(28, 28), cmap=\"binary\")\nplt.matshow(X.ix[2, :].reshape(28, 28), cmap=\"binary\")\nplt.matshow(X.ix[3, :].reshape(28, 28), cmap=\"binary\")", "_____no_output_____" ], [ "from sklearn.cluster import KMeans\nmodel = KMeans(n_clusters = 10)\nmodel.fit(X)\n# model.labels_ #which cluster ea observation belongs to\n# model.cluster_centers_ # coordinates of the centroids\n\n#Inclass exercise:\n\n#apply k means clustering to group the handwritten digits into 10 clusters. does ea clsuter correspond to a digit 0-9? \n#What happens when u try to group the digits into just 5 clusters", "_____no_output_____" ], [ "for i in range(10):\n plt.matshow(model.cluster_centers_[i,:].reshape(28,28), cmap = \"binary\")\n\n", "_____no_output_____" ], [ "model.labels_", "_____no_output_____" ], [ "import numpy as np\n\nfor i in np.where(model.labels_ == 4)[0][:10]:\n plt.matshow(X.ix[i,:].reshape(28,28), cmap = \"binary\")", "_____no_output_____" ], [ "for i in range(5):\n \n plt.matshow(model.cluster_centers_[i,:].reshape(28,28), cmap = 'binary')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05a336b0df1c9fa59edfdb11453cd9c814f2927
81,537
ipynb
Jupyter Notebook
Sarcastic Chatbot - Build The Dataset.ipynb
MohamedAliHabib/Sarcastic-Chatbot
0c23e75471632f232acb26235e8013a763b66f6f
[ "Apache-2.0" ]
9
2020-04-17T03:44:18.000Z
2021-09-15T11:41:20.000Z
Sarcastic Chatbot - Build The Dataset.ipynb
MohamedAliHabib/Sarcastic-Chatbot
0c23e75471632f232acb26235e8013a763b66f6f
[ "Apache-2.0" ]
null
null
null
Sarcastic Chatbot - Build The Dataset.ipynb
MohamedAliHabib/Sarcastic-Chatbot
0c23e75471632f232acb26235e8013a763b66f6f
[ "Apache-2.0" ]
7
2020-04-17T00:27:12.000Z
2020-07-11T18:37:19.000Z
36.046419
1,547
0.462686
[ [ [ "# Building the dataset", "_____no_output_____" ], [ "In this notebook, I'm going to be working with three datasets to create the dataset that the chatbot will be trained on.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "files_path = 'D:/Sarcastic Chatbot/Input/'", "_____no_output_____" ] ], [ [ "# First dataset\n**The Wordball Joke Dataset**, [link](https://www.kaggle.com/bfinan/jokes-question-and-answer/).\n\nThis dataset consists of three files, namely:\n1. <i>qajokes1.1.2.csv</i>: with <i>75,114</i> pairs.\n2. <i>t_lightbulbs.csv</i>: with <i>2,640</i> pairs.\n3. <i>t_nosubject.csv</i>: with <i>32,120</i> pairs.\n\nHowever, I'm not going to incorporate <i>t_lightbulbs.csv</i> in my dataset because I don't want that many examples of one topic. Besides, all the examples are similar in structure (they all start with <i>how many</i>).", "_____no_output_____" ], [ "Read the data files into pandas dataframes:", "_____no_output_____" ] ], [ [ "wordball_qajokes = pd.read_csv(files_path + 'qajokes1.1.2.csv', usecols=['Question', 'Answer'])\nwordball_nosubj = pd.read_csv(files_path + 't_nosubject.csv', usecols=['Question', 'Answer'])", "_____no_output_____" ], [ "print(len(wordball_qajokes))\nprint(len(wordball_nosubj))", "75114\n32120\n" ], [ "wordball_qajokes.head()", "_____no_output_____" ], [ "wordball_nosubj.head()", "_____no_output_____" ] ], [ [ "Concatenate both dataframes into one:", "_____no_output_____" ] ], [ [ "wordball = pd.concat([wordball_qajokes, wordball_nosubj], ignore_index=True)\nwordball.head()", "_____no_output_____" ], [ "print(f\"Number of question-answer pairs in the Wordball dataset: {len(wordball)}\")", "Number of question-answer pairs in the Wordball dataset: 107234\n" ] ], [ [ "## Text Preprocessing", "_____no_output_____" ], [ "It turns out that not all cells are of type string. So, we can just apply the *str* function to make sure that all of them are of the same desired type.", "_____no_output_____" ] ], [ [ "wordball = wordball.applymap(str)", "_____no_output_____" ] ], [ [ "Let's look at the characters used in this dataset:", "_____no_output_____" ] ], [ [ "def distinct_chars(data, cols):\n \"\"\"\n This method takes in a pandas dataframe and prints all distinct characters.\n data: a pandas dataframe.\n cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name \n of the questions column and the second item should be the name of the column corresponding to answers.\n \"\"\"\n \n if cols is None:\n cols = list(data.columns)\n \n # join all questions into one string\n questions = ' '.join(data[cols[0]])\n # join all answers into one string\n answers = ' '.join(data[cols[1]])\n \n # get distinct characters used in the data (all questions and answers)\n dis_chars = set(questions+answers)\n \n # print the distinct characters that are used in the data\n print(f\"Number of distinct characters used in the dataset: {len(dis_chars)}\")\n # print(dis_chars) \n dis_chars = list(dis_chars)\n \n # Now let's print those characters in an organized way\n digits = [char for char in dis_chars if char.isdigit()]\n alphabets = [char for char in dis_chars if char.isalpha()]\n special = [char for char in dis_chars if not (char.isdigit() | char.isalpha())]\n # sort them to make them easier to read\n digits = sorted(digits)\n alphabets = sorted(alphabets)\n special = sorted(special)\n \n print(f\"Digits: {digits}\")\n print(f\"Alphabets: {alphabets}\")\n print(f\"Special characters: {special}\")", "_____no_output_____" ], [ "distinct_chars(wordball, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 120\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nAlphabets: ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ß', 'è', 'é', 'ñ', 'ó', 'ö', 'ü']\nSpecial characters: [' ', '!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{', '|', '}', '~', '\\xa0', '¡', '¤', '«', '°', '»', '¿', '\\u200b', '–', '—', '‘', '’', '“', '”', '…', '™', '\\ufeff', '🎺']\n" ] ], [ [ "The following function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.", "_____no_output_____" ] ], [ [ "def clean_text(text):\n \"\"\"\n This method takes a string, applies different text preprocessing (characters replacement, removal of unwanted characters, \n removal of extra whitespaces) operations and returns a string.\n text: a string.\n \"\"\"\n import re\n \n text = str(text)\n \n # REPLACEMENT\n # replace \" with ' (because they basically mean the same thing)\n # text = text.replace('\\\"','\\'')\n text = re.sub('\\\"', '\\'', text)\n # replace “ and ” with '\n # text = text.replace(\"“\",'\\'').replace(\"”\",'\\'')\n text = re.sub(\"“\", '\\'', text)\n text = re.sub(\"”\", '\\'', text)\n # replace ’ with '\n # text = text.replace('’','\\'')\n text = re.sub('’', '\\'', text)\n # replace [] and {} with ()\n #text = text.replace('[','(').replace(']',')').replace('{','(').replace('}',')')\n text = re.sub('\\[','(', text)\n text = re.sub('\\]',')', text)\n text = re.sub('\\{','(', text)\n text = re.sub('\\}',')', text)\n # replace ? with itself and a whitespace preceding it\n # ex. what's your name? (we want the word name and question mark to be separate tokens)\n # text = re.sub('\\?', ' ?', text)\n # creating a space between a word and the punctuation following it\n # punctuation we're using: . , : ; ' ? ! + - * / = % $ @ & ( )\n text = re.sub(\"([?.!,:;'?!+\\-*/=%$@&()])\", r\" \\1 \", text)\n \n \n # REMOVAL OF UNWANTED CHARACTERS\n # accept only alphanumeric and some special characters and remove all others\n # a-zA-Z0-9 : matches any alphanumeric character and the underscore.\n # \\. : matches .\n # \\, : matches ,\n # \\: : matches :\n # \\; : matches ;\n # \\' : matches '\n # \\? : matches ?\n # \\! : matches !\n # \\+ : matches +\n # \\- : matches -\n # \\* : matches *\n # \\/ : matches /\n # \\= : matches =\n # \\% : matches %\n # \\$ : matches $\n # \\@ : matches @\n # \\& : matches &\n # ^ is added to the beginning of the set to express that we want the regex to recognize all other characters except\n # these that are explicitly specified, so that we can omit them.\n # define the pattern\n pattern = re.compile('[^a-zA-Z0-9_\\.\\,\\:\\;\\'\\?\\!\\+\\-\\*\\/\\=\\%\\$\\@\\&\\(\\)]')\n # remove unwanted characters\n text = re.sub(pattern, ' ', text)\n \n # lower case the characters in the string\n text = text.lower()\n \n # REMOVAL OF EXTRA WHITESPACES\n # remove duplicated spaces\n text = re.sub(' +', ' ', text)\n # remove leading and trailing spaces\n text = text.strip()\n \n return text", "_____no_output_____" ] ], [ [ "Let's try it out:", "_____no_output_____" ] ], [ [ "clean_text(\"A nice quote I read today: “Everything that you are going through is preparing you for what you asked for”. @hi % & =+-*/\")", "_____no_output_____" ] ], [ [ "The following method prints a question-answer pair from the dataset, it will be helpful to give us a sense of what the *clean_text* function results in:", "_____no_output_____" ] ], [ [ "def print_question_answer(df, index, cols):\n print(f\"Question: ({index})\")\n print(df.loc[index][cols[0]])\n print(f\"Answer: ({index})\")\n print(df.loc[index][cols[1]])", "_____no_output_____" ], [ "print(\"Before applying text preprocessing:\")\nprint_question_answer(wordball, 102, ['Question', 'Answer'])\nprint_question_answer(wordball, 200, ['Question', 'Answer'])\nprint_question_answer(wordball, 88376, ['Question', 'Answer'])\nprint_question_answer(wordball, 94351, ['Question', 'Answer'])", "Before applying text preprocessing:\nQuestion: (102)\nWhat's 11 & 2?\nAnswer: (102)\nThe Cowboys\nQuestion: (200)\nWhat did the girlfriend say to her boyfriend that was bitten by a zombie?\nAnswer: (200)\nYou're dead to me\"\nQuestion: (88376)\nI think my husband is psychic! \"Honey, what do you think of this outfit\nAnswer: (88376)\n\" {from other room} \"You look great!\"\nQuestion: (94351)\n{Thomas Edison prank call} Is your refrigerator running\nAnswer: (94351)\n \"Yes..\" YOU'RE WELCOME! *click*\n" ] ], [ [ "Apply text preprocessing (characters replacement, removal of unwanted characters, removal of extra whitespaces):", "_____no_output_____" ] ], [ [ "wordball = wordball.applymap(clean_text)", "_____no_output_____" ], [ "print(\"After applying text preprocessing:\")\nprint_question_answer(wordball, 102, ['Question', 'Answer'])\nprint_question_answer(wordball, 200, ['Question', 'Answer'])\nprint_question_answer(wordball, 88376, ['Question', 'Answer'])\nprint_question_answer(wordball, 94351, ['Question', 'Answer'])", "After applying text preprocessing:\nQuestion: (102)\nwhat ' s 11 & 2 ?\nAnswer: (102)\nthe cowboys\nQuestion: (200)\nwhat did the girlfriend say to her boyfriend that was bitten by a zombie ?\nAnswer: (200)\nyou ' re dead to me '\nQuestion: (88376)\ni think my husband is psychic ! ' honey , what do you think of this outfit\nAnswer: (88376)\n' ( from other room ) ' you look great ! '\nQuestion: (94351)\n( thomas edison prank call ) is your refrigerator running\nAnswer: (94351)\n' yes . . ' you ' re welcome ! * click *\n" ] ], [ [ "The following function applies some preprocessing operations on the data, concretely:\n1. Drops unecessary duplicate pairs (rows) but keep only one instance of all duplicates. *(For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)*\n2. Drops rows with empty question/answer. *(These may appear because of the previous step or because they happen to be empty in the original dataset) *\n3. Drops rows with more than 30 words in either the question or the answer or if the answer has less than two characters. *(Note: this is a hyperparameter and you can try other values.)*", "_____no_output_____" ] ], [ [ "def preprocess_data(data, cols):\n \"\"\"\n This method preprocess data and does the following:\n 1. drops unecessary duplicate pairs.\n 2. drops rows with empty strings.\n 3. drops rows with more than 30 words in either the question or the answer, \n or if the an answer has less than two characters.\n Arguments:\n data: a pandas dataframe.\n cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name \n of the questions column and the second item should be the name of the column corresponding to answers.\n Returns:\n a pandas dataframe.\n \"\"\"\n \n \n # (1) Remove unecessary duplicate pairs but keep only one instance of all duplicates.\n print('Removing unecessary duplicate pairs:')\n data_len_before = len(data) # len of data before removing duplicates\n print(f\"# of examples before removing duplicates: {data_len_before}\")\n # drop duplicates\n data = data.drop_duplicates(keep='first')\n data_len_after = len(data) # len of data after removing duplicates\n print(f\"# of examples after removing duplicates: {data_len_after}\")\n print(f\"# of removed duplicates: {data_len_before-data_len_after}\")\n \n \n # (2) Drop rows with empty strings.\n print('Removing empty string rows:')\n if cols is None:\n cols = list(data.columns)\n \n data_len_before = len(data) # len of data before removing empty strings\n print(f\"# of examples before removing rows with empty question/answers: {data_len_before}\")\n # I am going to use boolean masking to filter out rows with an empty question or answer\n data = data[(data[cols[0]] != '') & (data[cols[1]] != '')]\n # also, the following row results in the same as the above.\n # data = data.query('Answer != \"\" and Question != \"\"')\n data_len_after = len(data) # len of data after removing empty strings\n print(f\"# of examples after removing with empty question/answers: {data_len_after}\")\n print(f\"# of removed empty string rows: {data_len_before-data_len_after}\")\n \n \n # (3) Drop rows with more than 30 words in either the question or the answer\n # or if the an answer has less than two characters.\n def accepted_length(qa_pair):\n q_len = len(qa_pair[0].split(' '))\n a_len = len(qa_pair[1].split(' '))\n if (q_len <= 30) & ((a_len <= 30) & (len(qa_pair[1]) > 1)):\n return True\n return False\n \n print('Removing rows with more than 30 words in either the question or the answer:')\n data_len_before = len(data) # len of data before dropping those rows (30+ words)\n print(f\"# of examples before removing rows with more than 30 words: {data_len_before}\")\n # filter out rows with more than 30 words\n accepted_mask = data.apply(accepted_length, axis=1)\n data = data[accepted_mask]\n data_len_after = len(data) # len of data after dropping those rows (50+ words)\n print(f\"# of examples after removing rows with more than 30 words: {data_len_after}\")\n print(f\"# of removed empty rows with more than 30 words: {data_len_before-data_len_after}\")\n \n print(\"Data preprocessing is done.\")\n \n return data", "_____no_output_____" ], [ "wordball = preprocess_data(wordball, ['Question', 'Answer'])", "Removing unecessary duplicate pairs:\n# of examples before removing duplicates: 107234\n# of examples after removing duplicates: 107144\n# of removed duplicates: 90\nRemoving empty string rows:\n# of examples before removing rows with empty question/answers: 107144\n# of examples after removing with empty question/answers: 107054\n# of removed empty string rows: 90\nRemoving rows with more than 30 words in either the question or the answer:\n# of examples before removing rows with more than 30 words: 107054\n# of examples after removing rows with more than 30 words: 101712\n# of removed empty rows with more than 30 words: 5342\nData preprocessing is done.\n" ], [ "print(f\"# of question-answer pairs we have left in the Wordball dataset: {len(wordball)}\")", "# of question-answer pairs we have left in the Wordball dataset: 101712\n" ] ], [ [ "Let's look at the characters after cleaning the data:", "_____no_output_____" ] ], [ [ "distinct_chars(wordball, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 56\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nAlphabets: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nSpecial characters: [' ', '!', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '=', '?', '@', '_']\n" ] ], [ [ "# Second Dataset\n**reddit /r/Jokes**, [here](https://www.kaggle.com/cuddlefish/reddit-rjokes#jokes_score_name_clean.csv).\n\nThis dataset consists of two files, namely:\n1. <i>jokes_score_name_clean.csv</i>: with <i>133,992</i> pairs.\n2. <i>all_jokes.csv</i>\n\nHowever, I'm not going to incorporate <i>all_jokes.csv</i> in the dataset because it's so messy.", "_____no_output_____" ] ], [ [ "reddit_jokes = pd.read_csv(files_path + 'jokes_score_name_clean.csv', usecols=['q', 'a'])", "_____no_output_____" ] ], [ [ "Let's rename the columns to have them aligned with the previous dataset:", "_____no_output_____" ] ], [ [ "reddit_jokes.rename(columns={'q':'Question', 'a':'Answer'}, inplace=True)", "_____no_output_____" ], [ "reddit_jokes.head()", "_____no_output_____" ], [ "print(len(reddit_jokes))", "133328\n" ], [ "distinct_chars(reddit_jokes, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 567\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '²', '³', '¹', '₂', '₄']\nAlphabets: ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'µ', 'º', 'Ä', 'Ñ', 'Ö', 'ß', 'à', 'á', 'ã', 'ä', 'å', 'æ', 'ç', 'è', 'é', 'ê', 'ë', 'ì', 'í', 'î', 'ï', 'ñ', 'ò', 'ó', 'ô', 'õ', 'ö', 'ø', 'ù', 'ú', 'û', 'ü', 'þ', 'ā', 'ē', 'ě', 'ı', 'ń', 'ō', 'œ', 'ƃ', 'Ɔ', 'ǎ', 'ǐ', 'ǒ', 'ǚ', 'ǝ', 'ɐ', 'ɑ', 'ɔ', 'ə', 'ɟ', 'ɡ', 'ɥ', 'ɪ', 'ɯ', 'ɴ', 'ɹ', 'ɾ', 'ʇ', 'ʌ', 'ʍ', 'ʎ', 'ʏ', 'ʖ', 'ʘ', 'ʞ', 'ʟ', 'ʰ', 'ʲ', 'ʳ', 'ʷ', 'ʸ', 'ˈ', 'ˢ', 'Δ', 'Π', 'Σ', 'ί', 'α', 'κ', 'λ', 'μ', 'ν', 'π', 'ρ', 'ω', 'ϱ', 'А', 'Д', 'К', 'Т', 'а', 'е', 'л', 'м', 'о', 'т', 'ш', 'я', 'Ԁ', 'א', 'ב', 'ג', 'ה', 'ו', 'ז', 'ח', 'ט', 'י', 'ך', 'כ', 'ל', 'ם', 'ן', 'נ', 'ע', 'פ', 'צ', 'ק', 'ר', 'ש', 'ת', 'ॐ', 'ಠ', 'ᴀ', 'ᴄ', 'ᴅ', 'ᴇ', 'ᴉ', 'ᴍ', 'ᴏ', 'ᴘ', 'ᴛ', 'ᴜ', 'ᵃ', 'ᵇ', 'ᵈ', 'ᵉ', 'ᵒ', 'ᵖ', 'ᵗ', 'ᵘ', 'ᵛ', 'ᶦ', 'ᶫ', 'ᶰ', 'ḱ', 'ễ', 'ツ', 'ヮ', '一', '两', '个', '为', '主', '么', '了', '人', '什', '他', '但', '你', '做', '傲', '儿', '兴', '再', '卖', '向', '呀', '呢', '咦', '咩', '啲', '喺', '地', '块', '天', '女', '她', '好', '妈', '子', '孩', '对', '小', '干', '度', '很', '感', '我', '斋', '昨', '是', '杯', '林', '果', '浮', '狗', '男', '白', '的', '真', '穷', '笑', '糖', '緊', '给', '老', '要', '说', '趣', '那', '钱', '隻', '骄', 'fi', '゚']\nSpecial characters: ['\\x08', '\\t', '\\n', '\\r', '\\x19', '\\x1c', '\\x1d', ' ', '!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{', '|', '}', '~', '\\x92', '\\x93', '\\x94', '\\x9d', '\\xa0', '¡', '¢', '£', '¥', '«', '\\xad', '®', '¯', '°', '´', '·', '»', '¾', '¿', '×', '˙', '˚', '˜', '̕', '̖', '̗', '̘', '̙', '̛', '̜', '̝', '̞', '̟', '̠', '̡', '̢', '̣', '̤', '̥', '̦', '̧', '̨', '̩', '̪', '̫', '̬', '̭', '̮', '̯', '̰', '̱', '̲', '̳', '̴', '̵', '̶', '̷', '̸', '̹', '̺', '̻', '̼', '̀', '́', 'ͅ', '͇', '͈', '͉', '͍', '͎', '͏', '͓', '͔', '͕', '͖', '͘', '͙', '͚', '͜', '͞', '͟', '͠', '͡', '͢', '҉', '\\u2000', '\\u2009', '\\u200b', '\\u200e', '\\u200f', '–', '—', '―', '‘', '’', '‚', '“', '”', '„', '†', '•', '…', '\\u202a', '\\u202c', '′', '″', '‽', '⁄', '\\u206a', '€', '₱', '℉', '™', '⅄', '←', '↑', '→', '↓', '∀', '∆', '∑', '−', '√', '∫', '≤', '⌐', '⌘', '─', '╤', '╦', '╭', '╮', '█', '■', '□', '◊', '☀', '☝', '☞', '☹', '☺', '☼', '♡', '♥', '♦', '♨', '♪', '♫', '♭', '♻', '✂', '✈', '✌', '✏', '✓', '✔', '❤', '⟹', '⠁', '⠊', '⠙', '⠝', '⠠', '⬅', '\\u3000', '。', '「', '」', '️', '︻', '\\ufeff', ',', ':', '?', '£', '�', '🅱', '🇧', '🇩', '🇪', '🇷', '🇸', '🇺', '🍞', '🍻', '🎤', '🎵', '🎶', '🎷', '🎺', '🏆', '🏢', '🐯', '👍', '💀', '😀', '😁', '😂', '😃', '😄', '😅', '😆', '😈', '😉', '😊', '😋', '😌', '😎', '😏', '😐', '😑', '😕', '😘', '😛', '😜', '😝', '😟', '😡', '😢', '😣', '😥', '😦', '😨', '😩', '😫', '😭', '😮', '😱', '😳', '😶', '🙂', '🙄', '🙏', '🚜', '🤓', '🤔', '🤗', '🤣', '🥁', '🦀', '🧀']\n" ] ], [ [ "## Text Preprocessing", "_____no_output_____" ] ], [ [ "reddit_jokes = reddit_jokes.applymap(str)", "_____no_output_____" ] ], [ [ "Reddit data has some special tags like <i>[removed]</i> or <i>[deleted]</i> (these two mean that the comment has been removed/deleted). Also, they're written in an inconsistent way, i.e. you may find the tag <i>[removed]</i> capitalized or lowercased.<br>\nThe next function will address reddit tags as follows:\n1. Drops rows with deleted, removed or censored tags.\n2. Replaces other tags found in text with a whitespace. *(i.e. some comments have tags like <i>[censored], [gaming], [long], [request] and [dirty]</i> and we want to omit these tags from the text)*", "_____no_output_____" ] ], [ [ "def clean_reddit_tags(data, cols):\n \"\"\"\n This function removes reddit-related tags from the data and does the following:\n 1. drops rows with deleted, removed or censored tags.\n 2. replaces other tags found in text with a whitespace. \n Arguments:\n data: a pandas dataframe.\n cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name \n of the questions column and the second item should be the name of the column corresponding to answers.\n Returns:\n a pandas dataframe.\n \"\"\"\n \n import re\n \n if cols is None:\n cols = list(data.columns)\n \n # First, I'm going to lowercase all the text to address these tags \n # however, I'm not going to alter the original dataframe because I don't want text to be lowercased.\n data_copy = data.copy()\n data_copy[cols[0]] = data_copy[cols[0]].str.lower()\n data_copy[cols[1]] = data_copy[cols[1]].str.lower()\n \n # drop rows with deleted, removed or censored tags.\n # qa_pair[0] is the question, qa_pair[1] is the answer\n mask = data_copy.apply(lambda qa_pair: \n False if (qa_pair[0]=='[removed]') | (qa_pair[0]=='[deleted]') | (qa_pair[0]=='[censored]') |\n (qa_pair[1]=='[removed]') | (qa_pair[1]=='[deleted]') | (qa_pair[1]=='[censored]')\n else True, axis=1)\n # drop the rows, notice we're using the mask to filter out those rows\n # in the original dataframe 'data', because we don't need it anymore\n data = data[mask]\n print(f\"# of rows dropped with [deleted], [removed] or [censored] tags: {mask.sum()}\")\n \n # replaces other tags found in text with a whitespace. \n def sub_tag(pair):\n \"\"\"\n This method substitute tags (square brackets with words inside) with whitespace.\n Arguments:\n pair: a Pandas Series, where the first item is the question and the second is the answer.\n Returns:\n pair: a Pandas Series.\n \"\"\"\n # \\[(.*?)\\] is a regex to recognize square brackets [] with anything in between\n p=re.compile(\"\\[(.*?)\\]\")\n pair[0] = re.sub(p, ' ', pair[0])\n pair[1] = re.sub(p, ' ', pair[1])\n \n return pair\n \n # substitute tags with whitespaces.\n data = data.apply(sub_tag, axis=1)\n \n return data", "_____no_output_____" ], [ "print(\"Before addressing tags:\")\nprint_question_answer(reddit_jokes, 1825, ['Question', 'Answer'])\nprint_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])\nprint_question_answer(reddit_jokes, 59924, ['Question', 'Answer'])\nprint_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])", "Before addressing tags:\nQuestion: (1825)\nHow do you piss off an entire community with one word?\nAnswer: (1825)\n[Deleted]\nQuestion: (52906)\n[Corny] What does a highlighter say when it answers the phone?\nAnswer: (52906)\nYello?\nQuestion: (59924)\nHow do you disappoint a redditor?\nAnswer: (59924)\n[removed]\nQuestion: (1489)\nEverything men know about women\nAnswer: (1489)\n[ ]\n" ] ], [ [ "**Note:** the following cell may take multiple seconds to finish.", "_____no_output_____" ] ], [ [ "reddit_jokes = clean_reddit_tags(reddit_jokes, ['Question', 'Answer'])", "# of rows dropped with [deleted], [removed] or [censored] tags: 133117\n" ], [ "reddit_jokes", "_____no_output_____" ], [ "print(\"After addressing tags:\")\n# because rows with [removed], [deleted] and [censored] tags have been dropped\n# we're not going to print the rows (index=1825, index=59924) since they contain \n# those tags, or we're going to have a KeyError\nprint_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])\nprint_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])", "After addressing tags:\nQuestion: (52906)\n What does a highlighter say when it answers the phone?\nAnswer: (52906)\nYello?\nQuestion: (1489)\nEverything men know about women\nAnswer: (1489)\n \n" ] ], [ [ "**Note:** notice the question whose index is 52906, has some leading whitespaces. That's because it had the <i>[Corny]</i> tag and the function replaced it with whitespaces. Also, the question whose index is 1489 has an empty answer and that's because of the fact that the original answer just square brackets with some whitespaces in between. We're going to address all of that next!", "_____no_output_____" ], [ "Now, let's apply the *clean_text* function on the reddit data.<br>\n**Remember:** the *clean_text* function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.", "_____no_output_____" ] ], [ [ "reddit_jokes = reddit_jokes.applymap(clean_text)", "_____no_output_____" ], [ "print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])\nprint_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])", "Question: (52906)\nwhat does a highlighter say when it answers the phone ?\nAnswer: (52906)\nyello ?\nQuestion: (1489)\neverything men know about women\nAnswer: (1489)\n\n" ] ], [ [ "Everything looks good!<br>\nNow, let's apply the *preprocess_data* function on the data.<br>\n**Remember:** the *preprocess_data* function applies the following preprocessing operations:\n1. Drops unecessary duplicate pairs (rows) but keep only one instance of all duplicates. *(For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)*\n2. Drops rows with empty question/answer. *(These may appear because of the previous step or because they happen to be empty in the original dataset) *\n3. Drops rows with more than 30 words in either the question or the answer or if the an answer has less than two characters. *(Note: this is a hyperparameter and you can try other values.)*", "_____no_output_____" ] ], [ [ "reddit_jokes = preprocess_data(reddit_jokes, ['Question', 'Answer'])", "Removing unecessary duplicate pairs:\n# of examples before removing duplicates: 133117\n# of examples after removing duplicates: 128036\n# of removed duplicates: 5081\nRemoving empty string rows:\n# of examples before removing rows with empty question/answers: 128036\n# of examples after removing with empty question/answers: 127946\n# of removed empty string rows: 90\nRemoving rows with more than 30 words in either the question or the answer:\n# of examples before removing rows with more than 30 words: 127946\n# of examples after removing rows with more than 30 words: 89001\n# of removed empty rows with more than 30 words: 38945\nData preprocessing is done.\n" ], [ "print(f\"Number of question answer pairs in the reddit /r/Jokes dataset: {len(reddit_jokes)}\")", "Number of question answer pairs in the reddit /r/Jokes dataset: 89001\n" ], [ "distinct_chars(reddit_jokes, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 56\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nAlphabets: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nSpecial characters: [' ', '!', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '=', '?', '@', '_']\n" ] ], [ [ "# Third Dataset\n**Question-Answer Jokes**, [here](https://www.kaggle.com/jiriroz/qa-jokes).\n\nThis dataset consists of one file, namely:\n* <i>jokes_score_name_clean.csv</i>: with <i>38,269</i> pairs.", "_____no_output_____" ] ], [ [ "qa_jokes = pd.read_csv(files_path + 'jokes.csv', usecols=['Question', 'Answer'])\nqa_jokes", "_____no_output_____" ], [ "print(len(qa_jokes))", "38269\n" ], [ "distinct_chars(qa_jokes, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 237\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '³', '౪', '₄']\nAlphabets: ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'È', 'É', 'Ñ', 'ß', 'á', 'ä', 'å', 'æ', 'è', 'é', 'ê', 'ì', 'í', 'î', 'ï', 'ñ', 'ò', 'ó', 'õ', 'ö', 'ø', 'ù', 'ú', 'û', 'ü', 'Đ', 'ı', 'ō', 'œ', 'ʃ', 'ʅ', 'ʖ', 'Α', 'Μ', 'Ω', 'ά', 'ε', 'ζ', 'η', 'θ', 'κ', 'μ', 'π', 'ρ', 'ς', 'С', 'б', 'е', 'и', 'н', 'р', 'т', 'ь', 'ॐ', 'ಠ', 'ứ', 'づ', 'ツ', '丁', '二', '喲', '媽', '崇', '常', '清', '胖', '董', '這', '麼', '빵']\nSpecial characters: [' ', '!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{', '|', '}', '~', '\\xa0', '¡', '£', '¤', '©', '«', '¬', '\\xad', '®', '¯', '°', '´', '·', '»', '¿', '×', '̨', '̸', '͜', '͡', '\\u200a', '\\u200b', '\\u200e', '–', '—', '‘', '’', '“', '”', '•', '…', '\\u202a', '€', '™', '−', '√', '∞', '∫', '≠', '⌐', '─', '╤', '╦', '■', '◔', '♪', '⚡', '✈', '✏', '➕', '️', '︻', '\\ufeff', ',', '🎺', '🏢', '👌', '👍', '💩', '😀', '😂', '😃', '😆', '😏', '😔', '😜', '😳', '🙇', '🤘']\n" ] ], [ [ "## Text Preprocessing", "_____no_output_____" ], [ "If you look at some examples in the dataset, you notice that some examples has 'Q:' at beginning of the question and 'A:' at the beginning of the answer, so we need to get rid of these prefixes because they don't convey useful information.<br>\nYou also notice some examples where both 'Q:' and 'A:' are found in either the question or the answer, although I'm not going to omit these because they probably convey information and are part of the answer. However, some of them have 'Q:' in the question and 'Q: question A: answer' where the question in the answer is the same question, so we need to fix that.", "_____no_output_____" ] ], [ [ "def clean_qa_prefixes(data, cols):\n \"\"\"\n This function removes special prefixes ('Q:' and 'A:') found in the data.\n i.e. input=\"Q: how's your day?\" --> output=\" how's your day?\"\n Arguments:\n data: a pandas dataframe.\n cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name \n of the questions column and the second item should be the name of the column corresponding to answers.\n Returns:\n a pandas dataframe.\n \"\"\"\n def removes_prefixes(pair):\n \"\"\"\n This function removes prefixes ('Q:' and 'A:') from the question and answer.\n Examples:\n Input: qusetion=\"Q: what is your favorite Space movie?\", answer='A: Interstellar!'\n Output: qusetion=' what is your favorite Space movie?', answer=' Interstellar!'\n Input: question=\"Q: how\\'s your day?\", answer='Q: how\\'s your day? A: good, thanks.'\n Output: qusetion=\" how's your day?\", answer='good, thanks.'\n Input: qusetion='How old are you?', answer='old enough'\n Output: qusetion='How old are you?', answer='old enough'\n Arguments:\n pair: a Pandas Series, where the first item is the question and the second is the answer.\n Returns:\n pair: a Pandas Series.\n \"\"\"\n # pair[0] corresponds to the question\n # pair[1] corresponds to the answer\n # if the question contains 'Q:' and the answer contains 'A:' but doesn't contain 'Q:'\n if ('Q:' in pair[0]) and ('A:' in pair[1]) and ('Q:' not in pair[1]):\n pair[0] = pair[0].replace('Q:','')\n pair[1] = pair[1].replace('A:','')\n # if the answer contains both 'Q:' and 'A:'\n elif ('A:' in pair[1]) and ('Q:' in pair[1]):\n pair[0] = pair[0].replace('Q:','')\n # now we should check if the text between 'Q:' and 'A:' is the same text in the question (pair[0])\n # because if they are, this means that the question is repeated in the answer and we should address that.\n q_start = pair[1].find('Q:') + 2 # index of the start of the text that we want to extract\n q_end = pair[1].find('A:') # index of the end of the text that we want to extract\n q_txt = pair[1][q_start:q_end].strip()\n # if the question is repeated in the answer\n if q_txt == pair[0].strip():\n # in case the question is repeated in the answer, removes it from the answer\n pair[1] = pair[1][q_end+2:].strip()\n \n return pair\n \n return data.apply(removes_prefixes, axis=1)", "_____no_output_____" ], [ "print(\"Before removing unnecessary prefixes:\")\nprint_question_answer(qa_jokes, 44, ['Question', 'Answer'])\nprint_question_answer(qa_jokes, 22, ['Question', 'Answer'])\nprint_question_answer(qa_jokes, 31867, ['Question', 'Answer'])", "Before removing unnecessary prefixes:\nQuestion: (44)\nQ: What did the left leg say to the right leg?\nAnswer: (44)\nA: That one in the middle thinks he's hard.\nQuestion: (22)\nWhy does Santa have three gardens?\nAnswer: (22)\nQ: Why does Santa have three gardens? A: So he can \"hoe, hoe, hoe.\"\nQuestion: (31867)\nWhat is your favorite joke about women?\nAnswer: (31867)\nQ: Why don't women wear watches? A: Because there is a clock on the stove.\n" ], [ "qa_jokes = clean_qa_prefixes(qa_jokes, ['Question', 'Answer'])", "_____no_output_____" ], [ "print(\"After removing unnecessary prefixes:\")\nprint_question_answer(qa_jokes, 44, ['Question', 'Answer'])\nprint_question_answer(qa_jokes, 22, ['Question', 'Answer'])\nprint_question_answer(qa_jokes, 31867, ['Question', 'Answer'])", "After removing unnecessary prefixes:\nQuestion: (44)\n What did the left leg say to the right leg?\nAnswer: (44)\n That one in the middle thinks he's hard.\nQuestion: (22)\nWhy does Santa have three gardens?\nAnswer: (22)\nSo he can \"hoe, hoe, hoe.\"\nQuestion: (31867)\nWhat is your favorite joke about women?\nAnswer: (31867)\nQ: Why don't women wear watches? A: Because there is a clock on the stove.\n" ] ], [ [ "Notice that the third example both 'Q:' and 'A:' are part of the answer and conveys information.", "_____no_output_____" ], [ "Now, let's apply the *clean_text* function on the Question-Answer Jokes data.<br>\n**Remember:** the *clean_text* function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.", "_____no_output_____" ] ], [ [ "qa_jokes = qa_jokes.applymap(clean_text)", "_____no_output_____" ] ], [ [ "Now, let's apply the *preprocess_data* function on the data.<br>\n**Remember:** the *preprocess_data* function applies the following preprocessing operations:\n1. Drops unnecessary duplicate pairs (rows) but keep only one instance of all duplicates. *(For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)*\n2. Drops rows with an empty question/answer. *(These may appear because of the previous step or because they happen to be empty in the original dataset) *\n3. Drops rows with more than 30 words in either the question or the answer or if the an answer has less than two characters. *(Note: this is a hyperparameter and you can try other values.)*", "_____no_output_____" ] ], [ [ "qa_jokes = preprocess_data(qa_jokes, ['Question', 'Answer'])", "Removing unecessary duplicate pairs:\n# of examples before removing duplicates: 38269\n# of examples after removing duplicates: 38187\n# of removed duplicates: 82\nRemoving empty string rows:\n# of examples before removing rows with empty question/answers: 38187\n# of examples after removing with empty question/answers: 38166\n# of removed empty string rows: 21\nRemoving rows with more than 30 words in either the question or the answer:\n# of examples before removing rows with more than 30 words: 38166\n# of examples after removing rows with more than 30 words: 37086\n# of removed empty rows with more than 30 words: 1080\nData preprocessing is done.\n" ], [ "print(f\"Number of question-answer pairs in the Question-Answer Jokes dataset: {len(qa_jokes)}\")", "Number of question-answer pairs in the Question-Answer Jokes dataset: 37086\n" ], [ "distinct_chars(qa_jokes, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 56\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nAlphabets: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nSpecial characters: [' ', '!', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '=', '?', '@', '_']\n" ] ], [ [ "# Putting it together\n\nLet's concatenate all the data we have to create our final dataset.", "_____no_output_____" ] ], [ [ "dataset = pd.concat([wordball, reddit_jokes, qa_jokes], ignore_index=True)\ndataset.head()", "_____no_output_____" ], [ "print(f\"Number of question-answer pairs in the dataset: {len(dataset)}\")", "Number of question-answer pairs in the dataset: 227799\n" ] ], [ [ "There may be duplicate examples in the data so let's drop them:", "_____no_output_____" ] ], [ [ "data_len_before = len(dataset) # len of data before removing duplicates\nprint(f\"# of examples before removing duplicates: {data_len_before}\")\n# drop duplicates\ndataset = dataset.drop_duplicates(keep='first')\ndata_len_after = len(dataset) # len of data after removing duplicates\nprint(f\"# of examples after removing duplicates: {data_len_after}\")\nprint(f\"# of removed duplicates: {data_len_before-data_len_after}\")", "# of examples before removing duplicates: 227799\n# of examples after removing duplicates: 175671\n# of removed duplicates: 52128\n" ] ], [ [ "Let's drop rows with NaN values if there's any:", "_____no_output_____" ] ], [ [ "dataset.dropna(inplace=True)", "_____no_output_____" ], [ "dataset", "_____no_output_____" ] ], [ [ "Let's make sure that all our cells are of the same type:", "_____no_output_____" ] ], [ [ "dataset = dataset.applymap(str)", "_____no_output_____" ], [ "print(f\"Number of question-answer pairs in the dataset: {len(dataset)}\")", "Number of question-answer pairs in the dataset: 175671\n" ], [ "distinct_chars(dataset, ['Question', 'Answer'])", "Number of distinct characters used in the dataset: 56\nDigits: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nAlphabets: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nSpecial characters: [' ', '!', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '=', '?', '@', '_']\n" ] ], [ [ "Finally, let's save the dataset:", "_____no_output_____" ] ], [ [ "dataset.to_csv(files_path + '/dataset.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d05a365ef03f618a87f55107190e0d45bdd70d11
9,304
ipynb
Jupyter Notebook
60_linear_algebra_2/015_System_Linear_Eq_Four_Node_Truss.ipynb
cv2316eca19a/nmisp
731a01bc687f3380ddf370d45c3a286f754b45cb
[ "BSD-3-Clause" ]
null
null
null
60_linear_algebra_2/015_System_Linear_Eq_Four_Node_Truss.ipynb
cv2316eca19a/nmisp
731a01bc687f3380ddf370d45c3a286f754b45cb
[ "BSD-3-Clause" ]
null
null
null
60_linear_algebra_2/015_System_Linear_Eq_Four_Node_Truss.ipynb
cv2316eca19a/nmisp
731a01bc687f3380ddf370d45c3a286f754b45cb
[ "BSD-3-Clause" ]
null
null
null
24.041344
236
0.450881
[ [ [ "# 선형연립방정식 사례: 간단한 트러스<br>Example of Systems of Linear Equations : Simple Truss\n\n", "_____no_output_____" ] ], [ [ "# 그래프, 수학 기능 추가\n# Add graph and math features\nimport pylab as py\nimport numpy as np\nimport numpy.linalg as nl\n# 기호 연산 기능 추가\n# Add symbolic operation capability\nimport sympy as sy\n\n", "_____no_output_____" ] ], [ [ "화살표를 그리는 함수<br>Function to draw an arrow\n\n", "_____no_output_____" ] ], [ [ "def draw_2dvec(x, y, x0=0, y0=0, color='k', name=None):\n py.quiver(x0, y0, x, y, color=color, angles='xy', scale_units='xy', scale=1)\n if name is not None:\n if not name.startswith('$'):\n vec_str = '$\\\\vec{%s}$' % name\n else:\n vec_str = name\n py.text(0.5 * x + x0, 0.5 * y + y0, vec_str)\n\n", "_____no_output_____" ] ], [ [ "정삼각형을 그리는 함수<br>Function to draw an equilateral triangle\n\n", "_____no_output_____" ] ], [ [ "def triangle_support(x, y, length):\n # https://matplotlib.org/gallery/lines_bars_and_markers/fill.html\n height = py.cos(py.radians(30)) * length\n py.fill((x, x + length*0.5, x + length*-0.5), (y, y - height, y - height))\n\n", "_____no_output_____" ] ], [ [ "## 4 절점 트러스<br>A Four Node Truss\n\n", "_____no_output_____" ], [ "다음과 같은 트러스를 생각해 보자.<br>\nLet's think about a truss as follows.<br>\n(ref: \"[Application of system of linear equations](https://www.chegg.com/homework-help/questions-and-answers/application-system-linear-equations-sure-work-matlab-problem-figure-1-shows-mechanical-str-q22676917)\", Chegg Study)\n\n", "_____no_output_____" ] ], [ [ "# 마디점 좌표 nodal point coordinates\nxy_list = [(0, 0), (1, 1), (1, 0), (2, 0)]\n\n# 각 부재의 양 끝 점 end points of each member\nconnectivity_list = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)]\n\nfor k, i_j in enumerate(connectivity_list):\n i, j = i_j\n\n py.plot(\n (xy_list[i][0], xy_list[j][0]),\n (xy_list[i][1], xy_list[j][1]),\n '.-'\n )\n\n # 부재 번호 표시 Indicate member id\n py.text(0.5 * (xy_list[i][0] + xy_list[j][0]), \n 0.5 * (xy_list[i][1] + xy_list[j][1]), k + 1)\n\n# 마디점 번호 표시 Indicate node ids\nfor k, xy in enumerate(xy_list):\n py.text(xy[0], xy[1], '(%d)' % (k+1))\n \ndraw_2dvec(0, -0.5, xy_list[2][0], xy_list[2][1], name='$F_1$')\n\ntriangle_support(xy_list[0][0], xy_list[0][1], 0.25)\ntriangle_support(xy_list[3][0], xy_list[3][1], 0.25)\n\npy.axis('equal')\npy.xlim((-1, 3))\npy.ylim((-1, 2))\n# https://stackoverflow.com/questions/9295026/matplotlib-plots-removing-axis-legends-and-white-spaces\npy.axis('off')\n\npy.savefig('triangular_truss.svg')\n\n", "_____no_output_____" ] ], [ [ "모든 각은 45도 이다.<br>\nAll angles are 45 degrees.\n\n", "_____no_output_____" ], [ "$$\n\\alpha = sin\\left(\\frac{\\pi}{4}\\right) = cos\\left(\\frac{\\pi}{4}\\right)\n$$\n\n", "_____no_output_____" ], [ "각 마디에서의 힘의 평형은 다음과 같다. $f_i$ 는 $i$번째 부재의 장력이다.<br>\nForce equilibrium equations at respective nodes are as follows. $f_i$ is the tensile force of $i$th member.\n\n", "_____no_output_____" ], [ "$$\n\\begin{align} \n R_{1x} + \\alpha \\cdot f_{1}+f_{2} &= 0 \\\\\n R_{1y} + \\alpha \\cdot f_{1} &= 0 \\\\\n -\\alpha \\cdot f_{1}+\\alpha \\cdot f_{4} &=0 \\\\\n -\\alpha \\cdot f_{1}-f_{3}-\\alpha \\cdot f_{4} &=0 \\\\ \n -f_{2}+f_{5}&=0 \\\\ \n f_{3}&=F_{1} \\\\ \n -\\alpha \\cdot f_4 - f_5 &=0 \\\\\n \\alpha \\cdot f_4 + R_{4y} &=0 \\\\\n\\end{align}\n$$\n\n", "_____no_output_____" ], [ "행렬형태로는:<br>\nIn matrix form:\n\n", "_____no_output_____" ], [ "$$\n\\begin{bmatrix}\n1 & 0 & \\alpha & 1 & 0 & 0 & 0 & 0 \\\\\n0 & 1 & \\alpha & 0 & 0 & 0 & 0 & 0 \\\\\n0 & 0 & -\\alpha & 0 & 0 & \\alpha & 0 & 0 \\\\\n0 & 0 & -\\alpha & 0 & -1 & -\\alpha & 0 & 0 \\\\\n0 & 0 & 0 & -1 & 0 & 0 & 1 & 0 \\\\\n0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\\\\n0 & 0 & 0 & 0 & 0 & -\\alpha & -1 & 0 \\\\\n0 & 0 & 0 & 0 & 0 & \\alpha & 0 & 1 \\\\\n\\end{bmatrix}\n\\begin{pmatrix}\nR_{1x} \\\\ R_{1y} \\\\ f_1 \\\\ f_2 \\\\ f_3 \\\\ f_4 \\\\ f_5 \\\\ R_{4y}\n\\end{pmatrix}\n=\n\\begin{pmatrix}\n0 \\\\ 0 \\\\ 0 \\\\ 0 \\\\ 0 \\\\ F_1 \\\\ 0 \\\\ 0\n\\end{pmatrix}\n$$\n\n", "_____no_output_____" ] ], [ [ "alpha = py.sin(py.radians(45))\n\n", "_____no_output_____" ], [ "matrix = py.matrix([\n [1, 0, alpha, 1, 0, 0, 0, 0],\n [0, 1, alpha, 0, 0, 0, 0, 0],\n [0, 0, -alpha, 0, 0, alpha, 0, 0],\n [0, 0, -alpha, 0, -1, -alpha, 0, 0],\n [0, 0, 0, -1, 0, 0, 1, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, -alpha, -1, 0],\n [0, 0, 0, 0, 0, alpha, 0, 1],\n])\n\n", "_____no_output_____" ] ], [ [ "행렬의 계수를 계산해 보자.<br>Let's check the rank of the matrix.\n\n", "_____no_output_____" ] ], [ [ "nl.matrix_rank(matrix)\n\n", "_____no_output_____" ] ], [ [ "미지수의 갯수와 정방행렬의 계수가 같다는 것은 이 선형 연립 방정식의 해를 구할 수 있다는 뜻이다.<br>\nThe number of unknowns and the rank of the matrix are the same; we can find a root of this system of linear equations.\n\n", "_____no_output_____" ], [ "우변을 준비해 보자.<br>\nLet's prepare for the right side.\n\n", "_____no_output_____" ] ], [ [ "vector = py.matrix([[0, 0, 0, 0, 0, 100, 0, 0]]).T\n\n", "_____no_output_____" ] ], [ [ "파이썬의 확장 기능 가운데 하나인 NumPy 의 선형 대수 기능 `solve()` 를 사용하여 해를 구해 보자.<br>\nUsing `solve()` of linear algebra subpackage of `NumPy`, a Python package, let's find a solution.\n\n", "_____no_output_____" ] ], [ [ "sol = nl.solve(matrix, vector)\n\n", "_____no_output_____" ], [ "sol\n\n", "_____no_output_____" ] ], [ [ "![Triangular Truss](triangular_truss.svg)\n\n", "_____no_output_____" ], [ "## Final Bell<br>마지막 종\n\n", "_____no_output_____" ] ], [ [ "# stackoverfow.com/a/24634221\nimport os\nos.system(\"printf '\\a'\");\n\n", "_____no_output_____" ], [ "\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
d05a4f590c9b1140b1e7b6851d9bb79bcc4f66e4
13,068
ipynb
Jupyter Notebook
PythonForDataProfessionals/Python for Data Professionals/notebooks/.ipynb_checkpoints/02 Programming Basics-checkpoint.ipynb
fratei/sqlworkshops
bf05479084120856ee2f5dd8954f3d179ac5cc70
[ "MIT" ]
13
2020-04-04T02:28:17.000Z
2021-12-14T18:47:33.000Z
PythonForDataProfessionals/Python for Data Professionals/notebooks/.ipynb_checkpoints/02 Programming Basics-checkpoint.ipynb
tiagomqsantos/sqlworkshops
92658e42a95bf6388566f78395c6fd68f47be9e8
[ "MIT" ]
null
null
null
PythonForDataProfessionals/Python for Data Professionals/notebooks/.ipynb_checkpoints/02 Programming Basics-checkpoint.ipynb
tiagomqsantos/sqlworkshops
92658e42a95bf6388566f78395c6fd68f47be9e8
[ "MIT" ]
12
2020-04-02T15:59:06.000Z
2021-11-10T08:25:25.000Z
37.230769
479
0.531221
[ [ [ "![](../graphics/solutions-microsoft-logo-small.png)\n\n# Python for Data Professionals\n\n## 02 Programming Basics\n\n<p style=\"border-bottom: 1px solid lightgrey;\"></p>\n\n<dl>\n <dt>Course Outline</dt>\n <dt>1 - Overview and Course Setup</dt>\n <dt>2 - Programming Basics <i>(This section)</i></dt>\n <dd>2.1 - Getting help</dd>\n <dd>2.2 Code Syntax and Structure</dd>\n <dd>2.3 Variables<dd>\n <dd>2.4 Operations and Functions<dd>\n <dt>3 Working with Data</dt>\n <dt>4 Deployment and Environments</dt>\n<dl>\n\n<p style=\"border-bottom: 1px solid lightgrey;\"></p>", "_____no_output_____" ], [ "## Programming Basics Overview\n\nFrom here on out, you'll focus on using Python in programming mode - you'll write code that you run from an IDE or a calling environment, not interactively from the command-line. As you work through this explanation, copy the code you see and run it to see the results. After you work through these copy-and-paste examples, you'll create your own code in the Activities that follow each section.", "_____no_output_____" ], [ "<p><img style=\"float: left; margin: 0px 15px 15px 0px;\" src=\"../graphics/cortanalogo.png\"><b>2.1 - Getting help</b></p>\n\nThe very first thing you should learn in any language is how to get help. You can [find the help documents on-line](https://docs.python.org/3/index.html), or simply type\n \n`help()`\n \nin your code. For help on a specific topic, put the topic in the parenthesis:\n \n `help(str)`\n\n To see a list of topics, type \n\n `help(topics)`", "_____no_output_____" ] ], [ [ "# Try it:", "_____no_output_____" ] ], [ [ "<p><img style=\"float: left; margin: 0px 15px 15px 0px;\" src=\"../graphics/cortanalogo.png\"><b>2.2 Code Syntax and Structure</b></p>\n\nLet's cover a few basics about how Python code is written. (For a full discussion, check out the [Style Guide for Python, called PEP 8](https://www.python.org/dev/peps/pep-0008/) ) Let's use the \"Zen of Python\" rules from Tim Peters for this course:\n\n<pre>\n\n Beautiful is better than ugly.\n Explicit is better than implicit.\n Simple is better than complex.\n Complex is better than complicated.\n Flat is better than nested.\n Sparse is better than dense.\n Readability counts.\n Special cases aren't special enough to break the rules.\n Although practicality beats purity.\n Errors should never pass silently.\n Unless explicitly silenced.\n In the face of ambiguity, refuse the temptation to guess.\n There should be one-- and preferably only one --obvious way to do it.\n Although that way may not be obvious at first unless you're Dutch.\n Now is better than never.\n Although never is often better than right now.\n If the implementation is hard to explain, it's a bad idea.\n If the implementation is easy to explain, it may be a good idea.\n Namespaces are one honking great idea -- let's do more of those!\n --Tim Peters\n\n</pre>\n\nIn general, use standard coding practices - don't use keywords for variables, be consistent in your naming (camel-case, lower-case, etc.), comment your code clearly, and understand the general syntax of your language, and follow the principles above. But the most important tip is to at least read the PEP 8 and decide for yourself how well that fits into your Zen.\n\nThere is one hard-and-fast rule for Python that you *do* need to be aware of: indentation. You **must** indent your code for classes, functions (or methods), loops, conditions, and lists. You can use a tab or four spaces (spaces are the accepted way to do it) but in any case, you have to be consistent. If you use tabs, you always use tabs. If you use spaces, you have to use that throughout. It's best if you set your IDE to handle that for you, whichever way you go.\n\nPython code files have an extension of `.py`. \n\nComments in Python start with the hash-tag: `#`. There are no block comments (and this makes us all sad) so each line you want to comment must have a tag in front of that line. Keep the lines short (80 characters or so) so that they don't fall off a single-line display like at the command line.", "_____no_output_____" ], [ "<p><img style=\"float: left; margin: 0px 15px 15px 0px;\" src=\"../graphics/checkbox.png\"><b>2.3 Variables</b></p>\n\nVariables stand in for replaceable values. Python is not strongly-typed, meaning you can just declare a variable name and set it to a value at the same time, and Python will try and guess what data type you want. You use an `=` sign to assign values, and `==` to compare things.\n\nQuotes \\\" or ticks \\' are fine, just be consistent.\n\n`# There are some keywords to be aware of, but x and y are always good choices.`\n\n`x = \"Buck\" # I'm a string.`\n\n`type(x)`\n\n`y = 10 # I'm an integer.`\n\n`type(y)`\n\nTo change the type of a value, just re-enter something else:\n\n`x = \"Buck\" # I'm a string.`\n\n`type(x)`\n\n`x = 10 # Now I'm an integer.`\n\n`type(x)`\n\nOr cast it By implicitly declaring the conversion:\n\n`x = \"10\"`\n\n`type(x)`\n\n`print int(x)`\n\nTo concatenate string values, use the `+` sign:\n\n`x = \"Buck\"`\n\n`y = \" Woody\"`\n\n`print(x + y)`", "_____no_output_____" ] ], [ [ "# Try it:\n", "_____no_output_____" ] ], [ [ "<p><img style=\"float: left; margin: 0px 15px 15px 0px;\" src=\"../graphics/checkbox.png\"><b>2.4 Operations and Functions</b></p>\n\nPython has the following operators:\n\n Arithmetic Operators\n Comparison (Relational) Operators\n Assignment Operators\n Logical Operators\n Bitwise Operators\n Membership Operators\n Identity Operators\n\nYou have the standard operators and functions from most every language. Here are some of the tokens:\n\n<pre>\n\n != *= << ^ \n \" + <<= ^= \n \"\"\" += <= `\n % , <> __\n %= - == \n & -= > b\" \n &= . >= b' \n ' ... >> j \n ''' / >>= r\" \n ( // @ r' \n ) //= J |'\n * /= [ |= \n ** : \\ ~ \n **= < ] \n\n</pre>\n\nWait...that's it? That's all you're going to tell me? *(Hint: use what you've learned):*\n\n`help('symbols')`\n\nWalk through each of these operators carefully - you'll use them when you work with data in the next module.\n", "_____no_output_____" ] ], [ [ "# Try it:", "_____no_output_____" ] ], [ [ "<p><img style=\"float: left; margin: 0px 15px 15px 0px;\" src=\"../graphics/aml-logo.png\"><b>Activity - Programming basics</b></p>\n\nOpen the **02_ProgrammingBasics.py** file and run the code you see there. The exercises will be marked out using comments:\n\n`# <TODO> - Section Number`", "_____no_output_____" ] ], [ [ "# 02_ProgrammingBasics.py\n# Purpose: General Programming exercises for Python \n# Author: Buck Woody\n# Credits and Sources: Inline\n# Last Updated: 27 June 2018\n\n# 2.1 Getting Help\nhelp()\nhelp(str)\n\n# <TODO> - Write code to find help on help\n\n# 2.2 Code Syntax and Structure\n\n# <TODO> - Python uses spaces to indicate code blocks. Fix the code below:\nx=10\ny=5\nif x > y:\nprint(str(x) + \" is greater than \" + str(y))\n\n# <TODO> - Arguments on first line are forbidden when not using vertical alignment. Fix this code:\nfoo = long_function_name(var_one, var_two,\n var_three, var_four)\n\n# <TODO> operators sit far away from their operands. Fix this code:\nincome = (gross_wages +\n taxable_interest +\n (dividends - qualified_dividends) -\n ira_deduction -\n student_loan_interest)\n\n# <TODO> - The import statement should use separate lines for each effort. You can fix the code below \n# using separate lines or by using the \"from\" statement:\nimport sys, os\n\n# <TODO> - The following code has extra spaces in the wrong places. Fix this code:\ni=i+1\nsubmitted +=1\nx = x * 2 - 1\nhypot2 = x * x + y * y\nc = (a + b) * (a - b)\n\n# 2.3 Variables \n\n# <TODO> - Add a line below x=3 that changes the variable x from int to a string\nx=3\ntype(x)\n\n# <TODO> - Write code that prints the string \"This class is awesome\" using variables:\nx=\"is awesome\"\ny=\"This Class\"\n\n# 2.4 Operations and Functions\n\n# <TODO> - Use some basic operators to write the following code:\n# Assign two variables\n# Add them\n# Subtract 20 from each, add those values together, save that to a new variable\n# Create a new string variable with the text \"The result of my operations are: \"\n# Print out a single string on the screen with the result of the variables \n# showing that result. \n\n# EOF: 02_ProgrammingBasics.py", "_____no_output_____" ] ], [ [ "<p><img style=\"float: left; margin: 0px 15px 15px 0px;\" src=\"../graphics/thinking.jpg\"><b>For Further Study</b></p>\n\n- The PEP - https://www.python.org/dev/peps/pep-0008/\n- Introduction to the Python Coding Style - http://stackabuse.com/introduction-to-the-python-coding-style/\n- The Microsoft Tutorial and samples for Python - https://code.visualstudio.com/docs/languages/python \n- Coding requirements and standards - PEP - https://www.python.org/dev/peps/pep-0008/\n- Another free online self-paced course - https://www.w3schools.com/python/default.asp \n\nNext, Continue to *03 Working with Data*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d05a6251aec63346e2c7f09851d2b1493d00d679
86,944
ipynb
Jupyter Notebook
Data Science Academy/Python Fundamentos/Cap01/JupyterNotebook-ManualUsuario.ipynb
tobraga/Cursos
91fd430115999c8d011c45fd13dbf7021bce833e
[ "MIT" ]
null
null
null
Data Science Academy/Python Fundamentos/Cap01/JupyterNotebook-ManualUsuario.ipynb
tobraga/Cursos
91fd430115999c8d011c45fd13dbf7021bce833e
[ "MIT" ]
null
null
null
Data Science Academy/Python Fundamentos/Cap01/JupyterNotebook-ManualUsuario.ipynb
tobraga/Cursos
91fd430115999c8d011c45fd13dbf7021bce833e
[ "MIT" ]
null
null
null
36.469799
1,095
0.618226
[ [ [ "# Manual Jupyter Notebook:\n\nhttps://athena.brynmawr.edu/jupyter/hub/dblank/public/Jupyter%20Notebook%20Users%20Manual.ipynb", "_____no_output_____" ], [ "#Jupyter Notebook Users Manual\n\nThis page describes the functionality of the [Jupyter](http://jupyter.org) electronic document system. Jupyter documents are called \"notebooks\" and can be seen as many things at once. For example, notebooks allow:\n\n* creation in a **standard web browser**\n* direct **sharing**\n* using **text with styles** (such as italics and titles) to be explicitly marked using a [wikitext language](http://en.wikipedia.org/wiki/Wiki_markup)\n* easy creation and display of beautiful **equations**\n* creation and execution of interactive embedded **computer programs**\n* easy creation and display of **interactive visualizations**\n\nJupyter notebooks (previously called \"IPython notebooks\") are thus interesting and useful to different groups of people:\n\n* readers who want to view and execute computer programs\n* authors who want to create executable documents or documents with visualizations\n\n<hr size=\"5\"/>", "_____no_output_____" ], [ "###Table of Contents\n* [1. Getting to Know your Jupyter Notebook's Toolbar](#1.-Getting-to-Know-your-Jupyter-Notebook's-Toolbar)\n* [2. Different Kinds of Cells](#2.-Different-Kinds-of-Cells)\n\t* [2.1 Code Cells](#2.1-Code-Cells)\n\t\t* [2.1.1 Code Cell Layout](#2.1.1-Code-Cell-Layout)\n\t\t\t* [2.1.1.1 Row Configuration (Default Setting)](#2.1.1.1-Row-Configuration-%28Default-Setting%29)\n\t\t\t* [2.1.1.2 Cell Tabbing](#2.1.1.2-Cell-Tabbing)\n\t\t\t* [2.1.1.3 Column Configuration](#2.1.1.3-Column-Configuration)\n\t* [2.2 Markdown Cells](#2.2-Markdown-Cells)\n\t* [2.3 Raw Cells](#2.3-Raw-Cells)\n\t* [2.4 Header Cells](#2.4-Header-Cells)\n\t\t* [2.4.1 Linking](#2.4.1-Linking)\n\t\t* [2.4.2 Automatic Section Numbering and Table of Contents Support](#2.4.2-Automatic-Section-Numbering-and-Table-of-Contents-Support)\n\t\t\t* [2.4.2.1 Automatic Section Numbering](#2.4.2.1-Automatic-Section-Numbering)\n\t\t\t* [2.4.2.2 Table of Contents Support](#2.4.2.2-Table-of-Contents-Support)\n\t\t\t* [2.4.2.3 Using Both Automatic Section Numbering and Table of Contents Support](#2.4.2.3-Using-Both-Automatic-Section-Numbering-and-Table-of-Contents-Support)\n* [3. Keyboard Shortcuts](#3.-Keyboard-Shortcuts)\n* [4. Using Markdown Cells for Writing](#4.-Using-Markdown-Cells-for-Writing)\n\t* [4.1 Block Elements](#4.1-Block-Elements)\n\t\t* [4.1.1 Paragraph Breaks](#4.1.1-Paragraph-Breaks)\n\t\t* [4.1.2 Line Breaks](#4.1.2-Line-Breaks)\n\t\t\t* [4.1.2.1 Hard-Wrapping and Soft-Wrapping](#4.1.2.1-Hard-Wrapping-and-Soft-Wrapping)\n\t\t\t* [4.1.2.2 Soft-Wrapping](#4.1.2.2-Soft-Wrapping)\n\t\t\t* [4.1.2.3 Hard-Wrapping](#4.1.2.3-Hard-Wrapping)\n\t\t* [4.1.3 Headers](#4.1.3-Headers)\n\t\t* [4.1.4 Block Quotes](#4.1.4-Block-Quotes)\n\t\t\t* [4.1.4.1 Standard Block Quoting](#4.1.4.1-Standard-Block-Quoting)\n\t\t\t* [4.1.4.2 Nested Block Quoting](#4.1.4.2-Nested-Block-Quoting)\n\t\t* [4.1.5 Lists](#4.1.5-Lists)\n\t\t\t* [4.1.5.1 Ordered Lists](#4.1.5.1-Ordered-Lists)\n\t\t\t* [4.1.5.2 Bulleted Lists](#4.1.5.2-Bulleted-Lists)\n\t\t* [4.1.6 Section Breaks](#4.1.6-Section-Breaks)\n\t* [4.2 Backslash Escape](#4.2-Backslash-Escape)\n\t* [4.3 Hyperlinks](#4.3-Hyperlinks)\n\t\t* [4.3.1 Automatic Links](#4.3.1-Automatic-Links)\n\t\t* [4.3.2 Standard Links](#4.3.2-Standard-Links)\n\t\t* [4.3.3 Standard Links With Mouse-Over Titles](#4.3.3-Standard-Links-With-Mouse-Over-Titles)\n\t\t* [4.3.4 Reference Links](#4.3.4-Reference-Links)\n\t\t* [4.3.5 Notebook-Internal Links](#4.3.5-Notebook-Internal-Links)\n\t\t\t* [4.3.5.1 Standard Notebook-Internal Links Without Mouse-Over Titles](#4.3.5.1-Standard-Notebook-Internal-Links-Without-Mouse-Over-Titles)\n\t\t\t* [4.3.5.2 Standard Notebook-Internal Links With Mouse-Over Titles](#4.3.5.2-Standard-Notebook-Internal-Links-With-Mouse-Over-Titles)\n\t\t\t* [4.3.5.3 Reference-Style Notebook-Internal Links](#4.3.5.3-Reference-Style-Notebook-Internal-Links)\n\t* [4.4 Tables](#4.4-Tables)\n\t\t* [4.4.1 Cell Justification](#4.4.1-Cell-Justification)\n\t* [4.5 Style and Emphasis](#4.5-Style-and-Emphasis)\n\t* [4.6 Other Characters](#4.6-Other-Characters)\n\t* [4.7 Including Code Examples](#4.7-Including-Code-Examples)\n\t* [4.8 Images](#4.8-Images)\n\t\t* [4.8.1 Images from the Internet](#4.8.1-Images-from-the-Internet)\n\t\t\t* [4.8.1.1 Reference-Style Images from the Internet](#4.8.1.1-Reference-Style-Images-from-the-Internet)\n\t* [4.9 LaTeX Math](#4.9-LaTeX-Math)\n* [5. Bibliographic Support](#5.-Bibliographic-Support)\n\t* [5.1 Creating a Bibtex Database](#5.1-Creating-a-Bibtex-Database)\n\t\t* [5.1.1 External Bibliographic Databases](#5.1.1-External-Bibliographic-Databases)\n\t\t* [5.1.2 Internal Bibliographic Databases](#5.1.2-Internal-Bibliographic-Databases)\n\t\t\t* [5.1.2.1 Hiding Your Internal Database](#5.1.2.1-Hiding-Your-Internal-Database)\n\t\t* [5.1.3 Formatting Bibtex Entries](#5.1.3-Formatting-Bibtex-Entries)\n\t* [5.2 Cite Commands and Citation IDs](#5.2-Cite-Commands-and-Citation-IDs)\n* [6. Turning Your Jupyter Notebook into a Slideshow](#6.-Turning-Your-Jupyter-Notebook-into-a-Slideshow)\n", "_____no_output_____" ], [ "# 1. Getting to Know your Jupyter Notebook's Toolbar", "_____no_output_____" ], [ "At the top of your Jupyter Notebook window there is a toolbar. It looks like this:", "_____no_output_____" ], [ "![](images/jupytertoolbar.png)", "_____no_output_____" ], [ "Below is a table which helpfully pairs a picture of each of the items in your toolbar with a corresponding explanation of its function. ", "_____no_output_____" ], [ "Button|Function\n-|-\n![](images/jupytertoolbarsave.png)|This is your save button. You can click this button to save your notebook at any time, though keep in mind that Jupyter Notebooks automatically save your progress very frequently. \n![](images/jupytertoolbarnewcell.png)|This is the new cell button. You can click this button any time you want a new cell in your Jupyter Notebook. \n![](images/jupytertoolbarcutcell.png)|This is the cut cell button. If you click this button, the cell you currently have selected will be deleted from your Notebook. \n![](images/jupytertoolbarcopycell.png)|This is the copy cell button. If you click this button, the currently selected cell will be duplicated and stored in your clipboard. \n![](images/jupytertoolbarpastecell.png)|This is the past button. It allows you to paste the duplicated cell from your clipboard into your notebook. \n![](images/jupytertoolbarupdown.png)|These buttons allow you to move the location of a selected cell within a Notebook. Simply select the cell you wish to move and click either the up or down button until the cell is in the location you want it to be.\n![](images/jupytertoolbarrun.png)|This button will \"run\" your cell, meaning that it will interpret your input and render the output in a way that depends on [what kind of cell] [cell kind] you're using. \n![](images/jupytertoolbarstop.png)|This is the stop button. Clicking this button will stop your cell from continuing to run. This tool can be useful if you are trying to execute more complicated code, which can sometimes take a while, and you want to edit the cell before waiting for it to finish rendering. \n![](images/jupytertoolbarrestartkernel.png)|This is the restart kernel button. See your kernel documentation for more information.\n![](images/jupytertoolbarcellkind.png)|This is a drop down menu which allows you to tell your Notebook how you want it to interpret any given cell. You can read more about the [different kinds of cells] [cell kind] in the following section. \n![](images/jupytertoolbartoolbartype.png)|Individual cells can have their own toolbars. This is a drop down menu from which you can select the type of toolbar that you'd like to use with the cells in your Notebook. Some of the options in the cell toolbar menu will only work in [certain kinds of cells][cell kind]. \"None,\" which is how you specify that you do not want any cell toolbars, is the default setting. If you select \"Edit Metadata,\" a toolbar that allows you to edit data about [Code Cells][code cells] directly will appear in the corner of all the Code cells in your notebook. If you select \"Raw Cell Format,\" a tool bar that gives you several formatting options will appear in the corner of all your [Raw Cells][raw cells]. If you want to view and present your notebook as a slideshow, you can select \"Slideshow\" and a toolbar that enables you to organize your cells in to slides, sub-slides, and slide fragments will appear in the corner of every cell. Go to [this section][slideshow] for more information on how to create a slideshow out of your Jupyter Notebook. \n![](images/jupytertoolbarsectionmove.png)|These buttons allow you to move the location of an entire section within a Notebook. Simply select the Header Cell for the section or subsection you wish to move and click either the up or down button until the section is in the location you want it to be. If your have used [Automatic Section Numbering][section numbering] or [Table of Contents Support][table of contents] remember to rerun those tools so that your section numbers or table of contents reflects your Notebook's new organization. \n![](images/jupytertoolbarsectionnumbering.png)|Clicking this button will automatically number your Notebook's sections. For more information, check out the Reference Guide's [section on Automatic Section Numbering][section numbering].\n![](images/jupytertoolbartableofcontents.png)|Clicking this button will generate a table of contents using the titles you've given your Notebook's sections. For more information, check out the Reference Guide's [section on Table of Contents Support][table of contents].\n![](images/jupytertoolbarbib.png)|Clicking this button will search your document for [cite commands][] and automatically generate intext citations as well as a references cell at the end of your Notebook. For more information, you can read the Reference Guide's [section on Bibliographic Support][bib support].\n![](images/jupytertoolbartab.png)|Clicking this button will toggle [cell tabbing][], which you can learn more about in the Reference Guides' [section on the layout options for Code Cells][cell layout].\n![](images/jupytertoolbarcollumn.png)|Clicking this button will toggle the [collumn configuration][] for Code Cells, which you can learn more about in the Reference Guides' [section on the layout options for Code Cells][cell layout].\n![](images/jupytertoolbarspellcheck.png)|Clicking this button will toggle spell checking. Spell checking only works in unrendered [Markdown Cells][] and [Header Cells][]. When spell checking is on all incorrectly spelled words will be underlined with a red squiggle. Keep in mind that the dictionary cannot tell what are [Markdown][md writing] commands and what aren't, so it will occasionally underline a correctly spelled word surrounded by asterisks, brackets, or other symbols that have specific meaning in Markdown. \n\n\n[cell kind]: #2.-Different-Kinds-of-Cells \"Different Kinds of Cells\"\n[code cells]: #2.1-Code-Cells \"Code Cells\"\n[raw cells]: #2.3-Raw-Cells \"Raw Cells\"\n[slideshow]: #6.-Turning-Your-Jupyter-Notebook-into-a-Slideshow \"Turning Your Jupyter Notebook Into a Slideshow\"\n[section numbering]: #2.4.2.1-Automatic-Section-Numbering\n[table of contents]: #2.4.2.2-Table-of-Contents-Support\n[cell tabbing]: #2.1.1.2-Cell-Tabbing\n[cell layout]: #2.1.1-Code-Cell-Layout\n[bib support]: #5.-Bibliographic-Support\n[cite commands]: #5.2-Cite-Commands-and-Citation-IDs\n[md writing]: #4.-Using-Markdown-Cells-for-Writing\n[collumn configuration]: #2.1.1.3-Column-Configuration\n[Markdown Cells]: #2.2-Markdown-Cells\n[Header Cells]: #2.4-Header-Cells\n", "_____no_output_____" ], [ "# 2. Different Kinds of Cells", "_____no_output_____" ], [ "There are essentially four kinds of cells in your Jupyter notebook: Code Cells, Markdown Cells, Raw Cells, and Header Cells, though there are six levels of Header Cells. ", "_____no_output_____" ], [ "## 2.1 Code Cells", "_____no_output_____" ], [ "By default, Jupyter Notebooks' Code Cells will execute Python. Jupyter Notebooks generally also support JavaScript, Python, HTML, and Bash commands. For a more comprehensive list, see your Kernel's documentation. ", "_____no_output_____" ], [ "### 2.1.1 Code Cell Layout", "_____no_output_____" ], [ "Code cells have both an input and an output component. You can view these components in three different ways. ", "_____no_output_____" ], [ "#### 2.1.1.1 Row Configuration (Default Setting)", "_____no_output_____" ], [ "Unless you specific otherwise, your Code Cells will always be configured this way, with both the input and output components appearing as horizontal rows and with the input above the output. Below is an example of a Code Cell in this default setting:", "_____no_output_____" ] ], [ [ "2 + 3", "_____no_output_____" ] ], [ [ "#### 2.1.1.2 Cell Tabbing", "_____no_output_____" ], [ "Cell tabbing allows you to look at the input and output components of a cell separately. It also allows you to hide either component behind the other, which can be usefull when creating visualizations of data. Below is an example of a tabbed Code Cell:", "_____no_output_____" ] ], [ [ "2+3", "_____no_output_____" ] ], [ [ "#### 2.1.1.3 Column Configuration", "_____no_output_____" ], [ "Like the row configuration, the column layout option allows you to look at both the input and the output components at once. In the column layout, however, the two components appear beside one another, with the input on the left and the output on the right. Below is an example of a Code Cell in the column configuration:", "_____no_output_____" ] ], [ [ "2+3", "_____no_output_____" ] ], [ [ "## 2.2 Markdown Cells", "_____no_output_____" ], [ "In Jupyter Notebooks, Markdown Cells are the easiest way to write and format text. For a more thorough explanation of how to write in Markdown cells, refer to [this section of the guide][writing markdown].\n\n[writing markdown]: #4.-Using-Markdown-Cells-for-Writing \"Using Markdown Cells for Writing\"\n\n", "_____no_output_____" ], [ "## 2.3 Raw Cells", "_____no_output_____" ], [ "Raw Cells, unlike all other Jupyter Notebook cells, have no input-output distinction. This means that Raw Cells cannot be rendered into anything other than what they already are. If you click the run button in your tool bar with a Raw Cell selected, the cell will remain exactly as is and your Jupyter Notebook will automatically select the cell directly below it. Raw cells have no style options, just the same monospace font that you use in all other unrendered Notebook cells. You cannot bold, italicize, or enlarge any text or characters in a Raw Cell. \n\nBecause they have no rendered form, Raw Cells are mainly used to create examples. If you save and close your Notebook and then reopen it, all of the Code, Markdown, and Header Cells will automatically render in whatever form you left them when you first closed the document. This means that if you wanted to preserve the unrendered version of a cell, say if you were writing a computer science paper and needed code examples, or if you were writing [documentation on how to use Markdown] [writing markdown] and needed to demonstrate what input would yield which output, then you might want to use a Raw Cell to make sure your examples stayed in their most useful form. \n\n[writing markdown]: #4.-Using-Markdown-Cells-for-Writing \"Using Markdown Cells for Writing\"", "_____no_output_____" ], [ "## 2.4 Header Cells", "_____no_output_____" ], [ "While it is possible to organize your document using [Markdown headers][], Header Cells provide a more deeply structural organization for your Notebook and thus there are several advantages to using them. \n\n[Markdown headers]: #4.1.3-Headers \"Headers\" ", "_____no_output_____" ], [ "### 2.4.1 Linking", "_____no_output_____" ], [ "Header Cells have specific locations inside your Notebook. This means you can use them to [create Notebook-internal links](#4.3.5-Notebook-Internal-Links \"Notebook-Internal Links\").", "_____no_output_____" ], [ "### 2.4.2 Automatic Section Numbering and Table of Contents Support", "_____no_output_____" ], [ "Your Jupyter Notebook has two helpful tools that utilize the structural organization that Header Cells give your document: automatic section numbering and table of contents generation. ", "_____no_output_____" ], [ "#### 2.4.2.1 Automatic Section Numbering", "_____no_output_____" ], [ "Suppose you are writing a paper and, as is prone to happening when you have a lot of complicate thoughts buzzing around your brain, you've reorganized your ideas several times. Automatic section numbering will go through your Notebook and number your sections and subsection as designated by your Header Cells. This means that if you've moved one or more big sections around several times, you won't have to go through your paper and renumber it, as well as all its subsections, yourself.\n\n\n\n**Notes:** Automatic Section Numbering tri-toggling tool, so when you click the Number Sections button one of three actions will occur: Automatic Section Numbering will number your sections, correct inconsistent numbering, or unnumber your sections (if all of your sections are already consistently and correctly numbered). \n\nSo, even if you have previously numbered your sections, Automatic Section Numbering will go through your document, delete the current section numbers, and replace them the correct number in a linear sequence. This means that if your third section was once your second, Automatic Section Numbering will delete the \"2\" in front of your section's name and replace it with a \"3.\" \n\nWhile this function saves you a lot of time, it creates one limitation. Maybe you're writing a paper about children's books and one of the books you're discussing is called **`2 Cats`**. You've unsurprisingly titled the section where you summarize and analyze this book **`2 Cats`**. Automatic Section Numbering will assume the number 2 is section information and delete it, leaving just the title **`Cats`** behind. If you bold, italicize, or place the title of the section inside quotes, however, the entire section title will be be preserved without any trouble. It should also be noted that even if you must title a section with a number occurring before any letters and you do not want to bold it, italicize it, or place it inside quotes, then you can always run Automatic Section Numbering and then go to that section and retype its name by hand. \n\nBecause Automatic Section Numbering uses your header cells, its performance relies somewhat on the clarity of your organization. If you have two sections that begin with Header 1 Cells in your paper, and each of the sections has two subsections that begin with Header 2 Cells, Automatic Section Numbering will number them 1, 1.1, 1.2, 2, 2.1, and 2.2 respectively. If, however, you have used a Header 3 Cell to indicate the beginning of what would have been section 2.1, Automatic Section Numbering will number that section 2.0.1 and an error message will appear telling you that \"You placed a Header 3 cell under a Header 2 Cell in section 2\". Similarly, if you begin your paper with any Header Cell smaller than a Header 1, say a Header 3 Cell, then Automatic Section Numbering will number your first section 0.0.3 and an error message will appear telling you that \"Notebook begins with a Header 3 Cell.\"\n", "_____no_output_____" ], [ "#### 2.4.2.2 Table of Contents Support", "_____no_output_____" ], [ "The Table of Contents tool will automatically generate a table of contents for your paper by taking all your Header Cell titles and ordering them in a list, which it will place in a new cell at the very beginning of you Notebook. Because your Notebook does note utilize formal page breaks or numbers, each listed section will be hyperlinked to the actual section within your document.\n\n**Notes: **Because Table of Contents Support uses your header cells, its performance relies somewhat on the clarity of your organization. If you have two sections that begin with Header 1 Cells in your paper, and each of the sections has two subsections that begin with Header 2 Cells, Table of Contents will order them in the following way:\n\n* 1.\n * 1.1\n * 1.2\n* 2.\n * 2.1\n * 2.2\n \n\nIf, however, you have used a Header 3 Cell to indicate the beginning of what would have been section 2.1, Table of Contents Support will insert a dummy line so that your table of contents looks like this:\n\n\n* 1.\n * 1.1\n * 1.2\n* 2.\n * &nbsp; \n * 2.0.1\n * 2.2\n\n", "_____no_output_____" ], [ "#### 2.4.2.3 Using Both Automatic Section Numbering and Table of Contents Support", "_____no_output_____" ], [ "Automatic Section Numbering will always update every aspect of your notebook that is dependent on the title of one or more of your sections. This means that it will automatically correct an existing table of contents and all of your Notebook-internal links to reflect the new numbered section titles.\n", "_____no_output_____" ], [ "# 3. Keyboard Shortcuts", "_____no_output_____" ], [ "Jupyter Notebooks support many helpful Keyboard shortcuts, including ones for most of the buttons in [your toolbar][]. To view these shortcuts, you can click the help menu and then select Keyboard Shortcuts, as pictured below. \n\n[your toolbar]: #1.-Getting-to-Know-your-Jupyter-Notebook's-Toolbar \"Getting to know Your Jupyter Notebook's Toolbar\"", "_____no_output_____" ], [ "![](images/keyboardshortcuts.png)", "_____no_output_____" ], [ "# 4. Using Markdown Cells for Writing", "_____no_output_____" ], [ "**Why aren't there font and font size selection drop down menus, buttons I can press to bold and italicize my text, or other advanced style options in my Notebook?**", "_____no_output_____" ], [ "When you use Microsoft Word, Google Docs, Apple Pages, Open Office, or any other word processing software, you generally use your mouse to select various style options, like line spacing, font size, font color, paragraph format etc. This kind of system is often describes as a WYSIWYG (What You See Is What You Get) interface. This means that the input (what you tell the computer) exactly matches the output (what the computer gives back to you). If you type the letter **`G`**, highlight it, select the color green and up the font size to 64 pt, your word processor will show you a fairly large green colored letter **`G`**. And if you print out that document you will print out a fairly large green colored letter **`G`**. \n\nThis Notebook, however, does not use a WYSIWYG interface. Instead it uses something called a \"[markup Language][]\". When you use a a markup language, your input does not necessarily exactly equal your output.\n\n\n[markup language]: http://en.wikipedia.org/wiki/Markup_language \"Wikipedia Article on Markup\"\n\n", "_____no_output_____" ], [ "For example, if I type \"#Header 1\" at the beginning of a cell, but then press Shift-Enter (or click the play button at the top of the window), this notebook will turn my input into a somewhat different output in the following way:", "_____no_output_____" ], [ "<pre>\n#Header 1\n</pre>", "_____no_output_____" ], [ "#Header 1", "_____no_output_____" ], [ "And if I type \"##Header 2\" (at the beginning of a cell), this notebook will turn that input into another output:", "_____no_output_____" ], [ "<pre>\n##Header 2\n</pre>", "_____no_output_____" ], [ "##Header 2", "_____no_output_____" ], [ "In these examples, the hashtags are markers which tell the Notebook how to typeset the text. There are many markup languages, but one family, or perhaps guiding philosophy, of markup languages is called \"Markdown,\" named somewhat jokingly for its simplicity. Your Notebook uses \"marked,\" a Markdown library of typeset and other formatting instructions, like the hashtags in the examples above.\n\nMarkdown is a markup language that generates HTML, which the cell can interpret and render. This means that Markdown Cells can also render plain HTML code. If you're interested in learning HTML, check out this [helpful online tutorial][html tutorial].\n\n[html tutorial]: http://www.w3schools.com/html/ \"w3schools.com HTML Tutorial\"", "_____no_output_____" ], [ "**Why Use Markdown (and not a WYSIWYG)?**", "_____no_output_____" ], [ "Why is Markdown better? Well, it’s worth saying that maybe it isn't. Mainly, it’s not actually a question of better or worse, but of what’s in front of you and of who you are. A definitive answer depends on the user and on that user’s goals and experience. These Notebooks don't use Markdown because it's definitely better, but rather because it's different and thus encourages users to think about their work differently. \n\nIt is very important for computer science students to learn how to conceptualize input and output as dependent, but also distinct. One good reason to use Markdown is that it encourages this kind of thinking. Relatedly, it might also promote focus on substance over surface aesthetic. Markdown is somewhat limited in its style options, which means that there are inherently fewer non-subject-specific concerns to agonize over while working. It is the conceit of this philosophy that you would, by using Markdown and this Notebook, begin to think of the specific stylistic rendering of your cells as distinct from what you type into those same cells, and thus also think of the content of your writing as necessarily separate from its formating and appearance. ", "_____no_output_____" ], [ "## 4.1 Block Elements", "_____no_output_____" ], [ "### 4.1.1 Paragraph Breaks", "_____no_output_____" ], [ "Paragraphs consist of one or more consecutive lines of text and they are separated by one or more blank lines. If a line contains only spaces, it is a blank line.", "_____no_output_____" ], [ "### 4.1.2 Line Breaks", "_____no_output_____" ], [ "#### 4.1.2.1 Hard-Wrapping and Soft-Wrapping", "_____no_output_____" ], [ "If you're used to word processing software, you've been writing with automatically hard-wrapped lines and paragraphs. In a hard-wrapped paragraph the line breaks are not dependent on the size of the viewing window. If you click and drag your mouse to expand a word processing document, for example, the shape of the paragraphs and the length of the lines will not change. In other words, the length of a hard-wrapped line is determined either by the number of words in the line (in the case of word processing software where this number is predetermined and the program wraps for the user automatically), or individual intention (when a user manually presses an Enter or Return key to control exactly how long a line is).\n\nSoft-wrapped paragraphs and lines, however, *do* depend on the size of their viewing window. If you increase the size of a window where soft-wrapped paragraphs are displayed, they too will expand into longer lines, becoming shorter and wider to fill the increased window space horizontally. Unsurprising, then, if you *narrow* a window, soft-wrapped lines will shrink and the paragraphs will become longer vertically. \n\nMarkdown, unlike most word processing software, does not automatically hard-wrap. If you want your paragraphs to have a particular or deliberate shape and size, you must insert your own break by ending the line with two spaces and then typing Return.\n", "_____no_output_____" ], [ "#### 4.1.2.2 Soft-Wrapping", "_____no_output_____" ], [ "<tt>\nblah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah\n</tt>", "_____no_output_____" ], [ "blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah", "_____no_output_____" ], [ "#### 4.1.2.3 Hard-Wrapping", "_____no_output_____" ], [ "<tt>\nblah blah blah blah blah \nblah blah blah blah blah blah blah blah blah \nblah blah blah blah blah blah blah blah blah blah blah \nblah blah blah blah blah \nblah blah blah blah blah \nblah blah blah blah blah blah blah blah blah blah blah blah blah \n</tt>", "_____no_output_____" ], [ "blah blah blah blah blah \nblah blah blah blah blah blah blah blah blah \nblah blah blah blah blah blah blah blah blah blah blah \nblah blah blah blah blah \nblah blah blah blah blah \nblah blah blah blah blah blah blah blah blah blah blah blah blah \n", "_____no_output_____" ], [ "### 4.1.3 Headers", "_____no_output_____" ], [ "<pre>\n#Header 1\n</pre>", "_____no_output_____" ], [ "#Header 1", "_____no_output_____" ], [ "<pre>\n##Header 2\n</pre>", "_____no_output_____" ], [ "##Header 2", "_____no_output_____" ], [ "<pre>\n###Header 3\n</pre>", "_____no_output_____" ], [ "###Header 3", "_____no_output_____" ], [ "<pre>\n####Header 4\n</pre>", "_____no_output_____" ], [ "####Header 4", "_____no_output_____" ], [ "<pre>\n#####Header 5\n</pre>", "_____no_output_____" ], [ "#####Header 5", "_____no_output_____" ], [ "<pre>\n######Header 6\n</pre>", "_____no_output_____" ], [ "######Header 6", "_____no_output_____" ], [ "### 4.1.4 Block Quotes", "_____no_output_____" ], [ "#### 4.1.4.1 Standard Block Quoting", "_____no_output_____" ], [ "<tt>\n>blah blah block quote blah blah block quote blah blah block \nquote blah blah block quote blah blah block \nquote blah blah block quote blah blah block quote blah blah block quote\n</tt>", "_____no_output_____" ], [ ">blah blah block quote blah blah block quote blah blah block \nquote blah blah block quote blah blah block \nquote blah blah block quote blah blah block quote blah blah block quote", "_____no_output_____" ], [ "**Note**: Block quotes work best if you intentionally hard-wrap the lines.", "_____no_output_____" ], [ "#### 4.1.4.2 Nested Block Quoting", "_____no_output_____" ], [ "<pre>\n>blah blah block quote blah blah block quote blah blah block \nblock quote blah blah block block quote blah blah block \n>>quote blah blah block quote blah blah \nblock block quote blah blah block \n>>>quote blah blah block quote blah blah block quote blah blah block quote\n</pre>", "_____no_output_____" ], [ ">blah blah block quote blah blah block quote blah blah block \nblock quote blah blah block block quote blah blah block \n>>quote blah blah block quote blah blah \nblock block quote blah blah block \n>>>quote blah blah block quote blah blah block quote blah blah block quote", "_____no_output_____" ], [ "### 4.1.5 Lists", "_____no_output_____" ], [ "#### 4.1.5.1 Ordered Lists", "_____no_output_____" ], [ "In Markdown, you can list items using numbers, a **`+`**, a **` - `**, or a **`*`**. However, if the first item in a list or sublist is numbered, Markdown will interpret the entire list as ordered and will automatically number the items linearly, no matter what character you use to denote any given separate item.", "_____no_output_____" ], [ "<pre>\n####Groceries:\n\n0. Fruit:\n 6. Pears\n 0. Peaches\n 3. Plums\n 4. Apples \n 2. Granny Smith \n 7. Gala\n * Oranges\n - Berries \n 8. Strawberries \n + Blueberries\n * Raspberries\n - Bananas\n9. Bread:\n 9. Whole Wheat\n 0. With oats on crust\n 0. Without oats on crust\n 0. Rye \n 0. White\n0. Dairy:\n 0. Milk\n 0. Whole\n 0. Skim\n 0. Cheese\n 0. Wisconsin Cheddar\n 0. Pepper Jack\n</pre> ", "_____no_output_____" ], [ "####Groceries:\n\n0. Fruit:\n 6. Pears\n 0. Peaches\n 3. Plums\n 4. Apples \n 2. Granny Smith \n 7. Gala\n * Oranges\n - Berries \n 8. Strawberries \n + Blueberries\n * Raspberries\n - Bananas\n9. Bread:\n 9. Whole Wheat\n 0. With oats on crust\n 0. Without oats on crust\n 0. Rye \n 0. White\n0. Dairy:\n 0. Milk\n 0. Whole\n 0. Skim\n 0. Cheese\n 0. Wisconsin Cheddar\n 0. Pepper Jack", "_____no_output_____" ], [ "#### 4.1.5.2 Bulleted Lists", "_____no_output_____" ], [ "If you begin your list or sublist with a **`+`**, a **` - `**, or a **`*`**, then Markdown will interpret the whole list as unordered and will use bullets regardless of the characters you type before any individual list item.", "_____no_output_____" ], [ "<pre>\n####Groceries:\n\n* Fruit:\n * Pears\n 0. Peaches\n 3. Plums\n 4. Apples \n - Granny Smith \n 7. Gala\n * Oranges\n - Berries \n - Strawberries \n + Blueberries\n * Raspberries\n - Bananas\n9. Bread:\n * Whole Wheat\n * With oats on crust\n 0. Without oats on crust\n + Rye \n 0. White\n0. Dairy:\n * Milk\n + Whole\n 0. Skim\n - Cheese\n - Wisconsin Cheddar\n 0. Pepper Jack\n </pre>", "_____no_output_____" ], [ "####Groceries:\n\n* Fruit:\n * Pears\n 0. Peaches\n 3. Plums\n 4. Apples \n - Granny Smith \n 7. Gala\n * Oranges\n - Berries \n - Strawberries \n + Blueberries\n * Raspberries\n - Bananas\n9. Bread:\n * Whole Wheat\n * With oats on crust\n 0. Without oats on crust\n + Rye \n 0. White\n0. Dairy:\n * Milk\n + Whole\n 0. Skim\n - Cheese\n - Wisconsin Cheddar\n 0. Pepper Jack", "_____no_output_____" ], [ "### 4.1.6 Section Breaks", "_____no_output_____" ], [ "<pre>\n___\n</pre>", "_____no_output_____" ], [ "___ ", "_____no_output_____" ], [ "<pre>\n***\n</pre>", "_____no_output_____" ], [ "***", "_____no_output_____" ], [ "<pre>------</pre>", "_____no_output_____" ], [ "------", "_____no_output_____" ], [ "<pre>\n* * *\n</pre>", "_____no_output_____" ], [ "* * *", "_____no_output_____" ], [ "<pre>\n_ _ _\n</pre>", "_____no_output_____" ], [ "_ _ _", "_____no_output_____" ], [ "<pre> \n- - -\n</pre>", "_____no_output_____" ], [ "- - -", "_____no_output_____" ], [ "## 4.2 Backslash Escape", "_____no_output_____" ], [ "What happens if you want to include a literal character, like a **`#`**, that usually has a specific function in Markdown? Backslash Escape is a function that prevents Markdown from interpreting a character as an instruction, rather than as the character itself. It works like this:", "_____no_output_____" ], [ "<pre>\n\\# Wow, this isn't a header. \n# This is definitely a header.\n</pre>", "_____no_output_____" ], [ "\\# Wow, this isn't a header. \n# This is definitely a header.", "_____no_output_____" ], [ "Markdown allows you to use a backslash to escape from the functions of the following characters:\n* \\ backslash\n* ` backtick\n* \\* asterisk\n* _ underscore\n* {} curly braces\n* [] square brackets\n* () parentheses\n* \\# hashtag\n* \\+ plus sign|\n* \\- minus sign (hyphen)\n* . dot\n* ! exclamation mark", "_____no_output_____" ], [ "## 4.3 Hyperlinks", "_____no_output_____" ], [ "### 4.3.1 Automatic Links", "_____no_output_____" ], [ "<pre>\nhttp://en.wikipedia.org\n</pre>", "_____no_output_____" ], [ "http://en.wikipedia.org", "_____no_output_____" ], [ "### 4.3.2 Standard Links", "_____no_output_____" ], [ "<pre>\n[click this link](http://en.wikipedia.org)\n</pre>", "_____no_output_____" ], [ "[click this link](http://en.wikipedia.org)", "_____no_output_____" ], [ "### 4.3.3 Standard Links With Mouse-Over Titles", "_____no_output_____" ], [ "<pre> \n[click this link](http://en.wikipedia.org \"Wikipedia\")\n</pre>", "_____no_output_____" ], [ "[click this link](http://en.wikipedia.org \"Wikipedia\")", "_____no_output_____" ], [ "### 4.3.4 Reference Links", "_____no_output_____" ], [ "Suppose you are writing a document in which you intend to include many links. The format above is a little arduous and if you have to do it repeatedly *while* you're trying to focus on the content of what you're writing, it's going to be a really big pain. \n\nFortunately, there is an alternative way to insert hyperlinks into your text, one where you indicate that there is a link, name that link, and then use the name to provide the actually URL later on when you're less in the writing zone. This method can be thought of as a \"reference-style\" link because it is similar to using in-text citations and then defining those citations later in a more detailed reference section or bibliography. \n", "_____no_output_____" ], [ "<pre>\nThis is [a reference] [identification tag for link]\n\n[identification tag for link]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"\n</pre>", "_____no_output_____" ], [ "This is [a reference] [identification tag for link]\n\n[identification tag for link]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"", "_____no_output_____" ], [ "**Note:** The \"identification tag for link\" can be anything. For example:", "_____no_output_____" ], [ "<pre>\nThis is [a reference] [lfskdhflhslgfh333676]\n\n[lfskdhflhslgfh333676]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"\n</pre>", "_____no_output_____" ], [ "This is [a reference] [lfskdhflhslgfh333676]\n\n[lfskdhflhslgfh333676]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"", "_____no_output_____" ], [ "This means you can give your link an intuitive, easy to remember, and relevant ID:", "_____no_output_____" ], [ "<pre>\nThis is [a reference][Chile]\n\n[chile]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"\n</pre>", "_____no_output_____" ], [ "This is [a reference][Chile]\n\n[chile]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"", "_____no_output_____" ], [ "**Note**: Link IDs are not case-sensitive.", "_____no_output_____" ], [ "If you don't want to give your link an ID, you don't have to. As a short cut, Markdown will understand if you just use the words in the first set of brackets to define the link later on. This works in the following way:", "_____no_output_____" ], [ "<pre>\nThis is [a reference][]\n\n[a reference]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"\n</pre>", "_____no_output_____" ], [ "This is [a reference][]\n\n[a reference]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"", "_____no_output_____" ], [ "Another really helpful feature of a reference-style link is that you can define the link anywhere in the cell. (must be in the cell) For example:\n", "_____no_output_____" ], [ "<tt>\nThis is [a reference] [ref] blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah\nblah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah <br/><br/>\n\n[ref]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"\n</tt>", "_____no_output_____" ], [ "This is [a reference] [ref] blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah\nblah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah\n\n[ref]: http://en.wikipedia.org/wiki/Chile \"Wikipedia Article About Chile\"", "_____no_output_____" ], [ "**Note:** Providing a mouse-over title for any link, regardless of whether it is a standard or reference-stlye type, is optional. With reference-style links, you can include the mouse-over title by placing it in quotes, single quotes, or parentheses. For standard links, you can only define a mouse-over title in quotes.\n\n", "_____no_output_____" ], [ "### 4.3.5 Notebook-Internal Links", "_____no_output_____" ], [ "When you create a Header you also create a discrete location within your Notebook. This means that, just like you can link to a specific location on the web, you can also link to a Header Cell inside your Notebook. Internal links have very similar Markdown formatting to regular links. The only difference is that the name of the link, which is the URL in the case of external links, is just a hashtag plus the name of the Header Cell you are linking to (case-sensitive) with dashes in between every word. If you hover your mouse over a Header Cell, a blue Greek pi letter will appear next to your title. If you click on it, the URL at the top of your window will change and the internal link to that section will appear last in the address. You can copy and paste it in order to make an internal link inside a Markdown Cell. ", "_____no_output_____" ], [ "#### 4.3.5.1 Standard Notebook-Internal Links Without Mouse-Over Titles", "_____no_output_____" ], [ "<pre>\n&#91;Here's a link to the section of Automatic Section Numbering](&#35;Automatic-Section-Numbering)\n</pre>", "_____no_output_____" ], [ "[Here's a link to the section of Automatic Section Numbering](#2.4.2.1-Automatic-Section-Numbering)", "_____no_output_____" ], [ "#### 4.3.5.2 Standard Notebook-Internal Links With Mouse-Over Titles", "_____no_output_____" ], [ "<pre>\n&#91;Here's a link to the section on lists](#Lists \"Lists\")\n</pre>", "_____no_output_____" ], [ "[Here's a link to the section of Automatic Section Numbering](#2.4.2.1-Automatic-Section-Numbering)", "_____no_output_____" ], [ "#### 4.3.5.3 Reference-Style Notebook-Internal Links", "_____no_output_____" ], [ "<pre>\n[Here's a link to the section on Table of Contents Support][TOC]\n\n&#91;TOC]: #Table-of-Contents-Support\n</pre>", "_____no_output_____" ], [ "[Here's a link to the section on Table of Contents Support][TOC]\n\n[TOC]: #2.4.2.2-Table-of-Contents-Support", "_____no_output_____" ], [ "## 4.4 Tables", "_____no_output_____" ], [ "In Markdown, you can make a table by using vertical bars and dashes to define the cell and header borders:", "_____no_output_____" ], [ "<pre>\n|Header|Header|Header|Header|\n|------|------|------|------|\n|Cell |Cell |Cell | Cell |\n|Cell |Cell |Cell | Cell |\n|Cell |Cell |Cell | Cell |\n|Cell |Cell |Cell | Cell |\n</pre>", "_____no_output_____" ], [ "|Header|Header|Header|Header|\n|------|------|------|------|\n|Cell |Cell |Cell | Cell |\n|Cell |Cell |Cell | Cell |\n|Cell |Cell |Cell | Cell |\n|Cell |Cell |Cell | Cell |\n", "_____no_output_____" ], [ "Making a table this way might be especially useful if you want your document to be legible both rendered and unrendered. However, you don't *need* to include all of those dashes, vertical bars, and spaces for Markdown to understand that you're making a table. Here's the bare minimum you would need to create the table above: ", "_____no_output_____" ], [ "<pre>\nHeader|Header|Header|Header\n-|-|-|-\nCell|Cell|Cell|Cell\nCell|Cell|Cell|Cell\nCell|Cell|Cell|Cell\nCell|Cell|Cell|Cell\n</pre>", "_____no_output_____" ], [ "Header|Header|Header|Header\n-|-|-|-\nCell|Cell|Cell|Cell\nCell|Cell|Cell|Cell\nCell|Cell|Cell|Cell\nCell|Cell|Cell|Cell\n", "_____no_output_____" ], [ "It's important to note that the second line of dashes and vertical bars is essential. If you have just the line of headers and the second line of dashes and vertical bars, that's enough for Markdown to make a table. \n\nAnother important formatting issue has to do with the vertical bars that define the left and right edges of the table. If you include all the vertical bars on the far left and right of the table, like in the first example above, Markdown will ignore them completely. *But*, if you leave out some and include others, Markdown will interpret any extra vertical bar as an additional cell on the side that the bar appears in the unrendered version of the text. This also means that if you include the far left or right vertical bar in the second line of bars and dashes, you must include all of the otherwise optional vertical bars (like in the first example above).", "_____no_output_____" ], [ "### 4.4.1 Cell Justification", "_____no_output_____" ], [ "If not otherwise specified the text in each header and cell of a table will justify to the left. If, however, you wish to specify either right justification or centering, you may do so like this: ", "_____no_output_____" ], [ "<tt>\n**Centered, Right-Justified, and Regular Cells and Headers**:\n\ncentered header | regular header | right-justified header | centered header | regular header \n:-:|-|-:|:-:|-\ncentered cell|regular cell|right-justified cell|centered cell|regular cell\ncentered cell|regular cell|right-justified cell|centered cell|regular cell\n</tt>", "_____no_output_____" ], [ "**Centered, Right-Justified, and Regular Cells and Headers**:\n\ncentered header | regular header | right-justified header | centered header | regular header \n:-:|-|-:|:-:|-\ncentered cell|regular cell|right-justified cell|centered cell|regular cell\ncentered cell|regular cell|right-justified cell|centered cell|regular cell\n", "_____no_output_____" ], [ "While it is difficult to see that the headers are differently justified from one another, this is just because the longest line of characters in any column defines the width of the headers and cells in that column. ", "_____no_output_____" ], [ "**Note:** You cannot make tables directly beneath a line of text. You must put a blank line between the end of a paragraph and the beginning of a table. ", "_____no_output_____" ], [ "## 4.5 Style and Emphasis", "_____no_output_____" ], [ "<pre>\n*Italics*\n</pre>", "_____no_output_____" ], [ "*Italics*", "_____no_output_____" ], [ "<pre>\n_Italics_\n</pre>", "_____no_output_____" ], [ "_Italics_", "_____no_output_____" ], [ "<pre>\n**Bold**\n</pre>", "_____no_output_____" ], [ "**Bold**", "_____no_output_____" ], [ "<pre>\n__Bold__\n</pre>", "_____no_output_____" ], [ "__Bold__", "_____no_output_____" ], [ "**Note:** If you want actual asterisks or underscores to appear in your text, you can use the [backslash escape function] [backslash] like this:\n\n[backslash]: #4.2-Backslash-Escape \"Backslash Escape\"", "_____no_output_____" ], [ "<pre>\n\\*awesome asterisks\\* and \\_incredible under scores\\_\n</pre>", "_____no_output_____" ], [ "\\*awesome asterisks\\* and \\_incredible under scores\\_", "_____no_output_____" ], [ "## 4.6 Other Characters", "_____no_output_____" ], [ "<pre> \nAmpersand &amp;amp; Ampersand\n</pre>", "_____no_output_____" ], [ "Ampersand &amp; Ampersand", "_____no_output_____" ], [ "<pre>\n&amp;lt; angle brackets &amp;gt;\n</pre>", "_____no_output_____" ], [ "&lt; angle brackets &gt;", "_____no_output_____" ], [ "<pre>\n&amp;quot; quotes &amp;quot; ", "_____no_output_____" ], [ "&quot; quotes &quot; ", "_____no_output_____" ], [ "## 4.7 Including Code Examples", "_____no_output_____" ], [ "If you want to signify that a particular section of text is actually an example of code, you can use backquotes to surround the code example. These will switch the font to monospace, which creates a clear visual formatting difference between the text that is meant to be code and the text that isn't. \n\nCode can either in the middle of a paragraph, or as a block. Use a single backquote to start and stop code in the middle of a paragraph. Here's an example:", "_____no_output_____" ], [ "<pre>\nThe word `monospace` will appear in a code-like form.\n</pre>", "_____no_output_____" ], [ "The word `monospace` will appear in a code-like form.", "_____no_output_____" ], [ "**Note:** If you want to include a literal backquote in your code example you must suround the whole text block in double backquotes like this: ", "_____no_output_____" ], [ "<pre>\n`` Look at this literal backquote ` ``\n</pre>", "_____no_output_____" ], [ "`` Look at this literal backquote ` ``", "_____no_output_____" ], [ "To include a complete code-block inside a Markdown cell, use triple backquotes. Optionally, you can put the name of the language that you are quoting after the starting triple backquotes, like this:", "_____no_output_____" ], [ "<pre>\n```python\ndef function(n):\n return n + 1\n```\n</pre>", "_____no_output_____" ], [ "That will format the code-block (sometimes called \"fenced code\") with syntax coloring. The above code block will be rendered like this:", "_____no_output_____" ], [ "```python\ndef function(n):\n return n + 1\n```", "_____no_output_____" ], [ "The language formatting names that you can currently use after the triple backquote are:", "_____no_output_____" ], [ "<pre>\napl django go jinja2 ntriples q smalltalk toml\nasterisk dtd groovy julia octave r smarty turtle\nclike dylan haml less pascal rpm smartymixed vb\nclojure ecl haskell livescript pegjs rst solr vbscript\ncobol eiffel haxe lua perl ruby sparql velocity\ncoffeescript erlang htmlembedded markdown php rust sql verilog\ncommonlisp fortran htmlmixed pig sass stex xml\ncss gas http mirc properties scheme tcl xquery\nd gfm jade mllike puppet shell tiddlywiki yaml\ndiff gherkin javascript nginx python sieve tiki z80\n</pre>", "_____no_output_____" ], [ "## 4.8 Images", "_____no_output_____" ], [ "### 4.8.1 Images from the Internet", "_____no_output_____" ], [ "Inserting an image from the internet is almost identical to inserting a link. You just also type a **`!`** before the first set of brackets:", "_____no_output_____" ], [ "<pre>\n![It doesn't matter what you write here](http://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/South_African_Giraffe,_head.jpg/877px-South_African_Giraffe,_head.jpg \"Picture of a Giraffe)\n</pre>", "_____no_output_____" ], [ "![It doesn't matter what I write here](http://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/South_African_Giraffe,_head.jpg/877px-South_African_Giraffe,_head.jpg \"Picture of a Giraffe\")", "_____no_output_____" ], [ "**Note:** Unlike with a link, the words that you type in the first set of brackets do not appear when they are rendered into html by Markdown. ", "_____no_output_____" ], [ "#### 4.8.1.1 Reference-Style Images from the Internet", "_____no_output_____" ], [ "Just like with links, you can also use a reference-style format when inserting images from the internet. This involves indicating where you want to place a picture, giving that picture an ID tag, and then later defining that ID tag. The process is nearly identical to using the reference-style format to insert a link:", "_____no_output_____" ], [ "<pre>\n![][giraffe]\n\n[giraffe]:http://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/South_African_Giraffe,_head.jpg/877px-South_African_Giraffe,_head.jpg \"Picture of a Giraffe\"\n</pre>", "_____no_output_____" ], [ "![][giraffe]\n\n[giraffe]: http://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/South_African_Giraffe,_head.jpg/877px-South_African_Giraffe,_head.jpg \"Picture of a Giraffe\"", "_____no_output_____" ], [ "## 4.9 LaTeX Math", "_____no_output_____" ], [ "Jupyter Notebooks' Markdown cells support LateX for formatting mathematical equations. To tell Markdown to interpret your text as LaTex, surround your input with dollar signs like this:", "_____no_output_____" ], [ "<pre>\n$z=\\dfrac{2x}{3y}$\n</pre>", "_____no_output_____" ], [ "$z=\\dfrac{2x}{3y}$", "_____no_output_____" ], [ "An equation can be very complex:\n\n<pre>\n$F(k) = \\int_{-\\infty}^{\\infty} f(x) e^{2\\pi i k} dx$\n</pre>", "_____no_output_____" ], [ "$F(k) = \\int_{-\\infty}^{\\infty} f(x) e^{2\\pi i k} dx$", "_____no_output_____" ], [ "If you want your LaTex equations to be indented towards the center of the cell, surround your input with two dollar signs on each side like this: ", "_____no_output_____" ], [ "<pre>\n$$2x+3y=z$$\n</pre>", "_____no_output_____" ], [ "$$2x+3y=z$$", "_____no_output_____" ], [ "For a comprehensive guide to the mathematical symbols and notations supported by Jupyter Notebooks' Markdown cells, check out [Martin Keefe's helpful reference materials on the subject][mkeefe].\n\n[mkeefe]: http://martinkeefe.com/math/mathjax1 \"Martin Keefe's MathJax Guide\"", "_____no_output_____" ], [ "# 5. Bibliographic Support", "_____no_output_____" ], [ "Bibliographic Support makes managing references and citations in your Notebook much easier, by automating some of the bibliographic process every person goes through when doing research or writing in an academic context. There are essentially three steps to this process for which your Notebook's Bibliographic support can assist: gathering and organizing sources you intend to use, citing those sources within the text you are writing, and compiling all of the material you referenced in an organized, correctly formatted list, the kind which usually appears at the end of a paper in a section titled \"References,\" \"Bibliography,\" or \"Works Cited. \n\nIn order to benefit from this functionality, you need to do two things while writing your paper: first, you need to create a [Bibtex database][bibdb] of information about your sources and second, you must use the the [cite command][cc] in your Markdown writing cells to indicate where you want in-text citations to appear.\n\nIf you do both these things, the \"Generate References\" button will be able to do its job by replacing all of your cite commands with validly formatted in-text citations and creating a References section at the end of your document, which will only ever include the works you specifically cited within in your Notebook. \n\n**Note:** References are generated without a header cell, just a [markdown header][]. This means that if you want a References section to appear in your table of contents, you will have to unrender the References cell, delete the \"References\" header, make a Header Cell of the appropriate level and title it \"References\" yourself, and then generate a table of contents using [Table of Contents Support][table of contents]. This way, you can also title your References section \"Bibliography\" or \"Works Cited,\" if you want.\n\n[markdown header]: #4.1.3-Headers\n[table of contents]: #2.4.2.2-Table-of-Contents-Support\n[bibdb]: #5.1-Creating-a-Bibtex-Database\n[cc]:#5.2-Cite-Commands-and-Citation-IDs\n\n\n", "_____no_output_____" ], [ "## 5.1 Creating a Bibtex Database", "_____no_output_____" ], [ "Bibtex is reference management software for formatting lists of references ([from Wikipedia](BibTeX is reference management software for formatting lists of references \"Wikipedia Article On Bibtex\")). While your Notebook does not use the Bibtex software, it does use [Bibtex formatting](#5.1.3-Formatting-Bibtex-Entries) for creating references within your Bibliographic database.\n\nIn order for the Generate References button to work, you need a bibliographic database for it to search and match up with the sources you've indicated you want to credit using [cite commands and citation IDs](#5.2-Cite-Commands-and-Citation-IDs).\n\nWhen creating a bibliographic database for your Notebook, you have two options: you can make an external database, which will exist in a separate Notebook from the one you are writing in, or you can make an internal database which will exist in a single cell inside the Notebook in which you are writing. Below are explanations of how to use these database creation strategies, as well as a discussion of the pros and cons for each. ", "_____no_output_____" ], [ "### 5.1.1 External Bibliographic Databases", "_____no_output_____" ], [ "To create an external bibliographic database, you will need to create a new Notebook and title it **`Bibliography`** in the toplevel folder of your current Jupyter session. As long as you do not also have an internal bibliographic database, when you click the Generate References button your Notebook's Bibliographic Support will search this other **`Bibliography`** Notebook for Bibtex entries. Bibtex entries can be in any cell and in any kind of cell in your **`Bibliography`** Notebook as long as the cell begins with **`<!--bibtex`** and ends with **`-->`**. Go to [this section][bibfor] for examples of valid BibTex formatting.\n\nNot every cell has to contain BibTex entries for the external bibliographic database to work as intended with your Notebook's bibliographic support. This means you can use the same helpful organization features that you use in other Notebooks, like [Automatic Section Numbering][asn] and [Table of Contents Support][toc], to structure your own little library of references. The best part of this is that any Notebook containing validly formatted [cite commands][cc] can check your external database and find only the items that you have indicated you want to cite. So you only ever have to make the entry once and your external database can grow large and comprehensive over the course of your accademic writing career. \n\nThere are several advantages to using an external database over [an internal one][internal database]. The biggest one, which has already been described, is that you will only ever need to create one and you can organize it into sections by using headers and generating [automatic section numbers][asn] and a [table of contents][toc]. These tools will help you to easily find the right [citation ID][cc] for a given source you want to cite. The other major advantage is that an external database is not visible when viewing the Notebook in which you are citing sources and generating a References list. Bibtex databases are not very attractive or readable and you probably won't want one to show up in your finished document. There are [ways to hide internal databases][hiding bibtex cell], but it's convenient not to have to worry about that. \n\n\n[asn]: #2.4.2.1-Automatic-Section-Numbering\n[toc]: #2.4.2.2-Table-of-Contents-Support\n[cc]: #5.2-Cite-Commands-and-Citation-IDs\n[hiding bibtex cell]: #5.1.2.1-Hiding-Your-Internal-Database\n[bibfor]:#5.1.3-Formatting-Bibtex-Entries", "_____no_output_____" ], [ "### 5.1.2 Internal Bibliographic Databases", "_____no_output_____" ], [ "Unlike [external bibliographic databases][exd], which are comprised from an entire separate notebook, internal bibliographic databases consist of only one cell within in the Notebook in which you are citing sources and compiling a References list. The single cell, like all of the many BibTex cells that can make up an external database, must begin with **`<!--bibtex`** and end with **`-->`** in order to be validly formatted and correctly interpreted by your Notebook's Bibliographic Support. It's probably best to keep this cell at the very end or the very beginning of your Notebook so you always know where it is. This is because when you use an intenral bibliographic databse it can only consist of one cell. This means that if you want to cite multiple sources you will need to keep track of the single cell that comprises your entire internal bibliographic database during every step of the research and writing process. \n\nInternal bibliographic databases make more sense when your project is a small one and the list of total sources is short. This is especially convenient if you don't already have a built-up external database. With an internal database you don't have to create and organize a whole separate Notebook, a task that's only useful when you have to keep track of a lot of different material. Additionally, if you want to share your finished Notebook with others in a form that retains its structural validity, you only have to send one Notebook, as oppose to both the project itself and the Notebook that comprises your external bibliographic database. This is especially useful for a group project, where you want to give another reader the ability to edit, not simply read, your References section. \n\n[exd]:#5.1.1-External-Bibliographic-Databases\n", "_____no_output_____" ], [ "#### 5.1.2.1 Hiding Your Internal Database", "_____no_output_____" ], [ "Even though they have some advantages, especially for smaller projects, internal databases have on major draw back. They are not very attractive or polished looking and you probably won't want one to appear in your final product. Fortunately, there are two methods for hiding your internal biblioraphic database.\n\nWhile your Notebook's bibliographic support will be able to interpret [correctly formatted BibTex entries][bibfor] in any [kind of cell][cell kind], if you use a [Markdown Cell][md cell] to store your internal bibliographic database, then when you run the cell all of the ugly BibTex formatting will disappear. This is handy, but it also makes the cell very difficult to find, so remember to keep careful track of where your hidden BibTex databse is if you're planning to edit it later. If you want your final product to be viewed stably as HTML, then you can make your internal BibTex database inside a [Raw Cell][RC], use the [cell toolbar][] to select \"Raw Cell Format\", and then select \"None\" in the toolbar that appears in the corner of your Raw Cell BibTex database. This way, you will still be able to easily find and edit the database when you are working on your Notebook, but others won't be able to see the database when viewing your project in its final form. \n\n\n[cell toolbar]: #1.-Getting-to-Know-your-Jupyter-Notebook's-Toolbar\n[bibfor]:#5.1.3-Formatting-Bibtex-Entries\n[RC]:#2.3-Raw-Cells\n[md cell]: #2.2-Markdown-Cells\n[cell kind]: #2.-Different-Kinds-of-Cells", "_____no_output_____" ], [ "### 5.1.3 Formatting Bibtex Entries", "_____no_output_____" ], [ "BibTex entries consist of three crucial components: one, the type of source you are citing (a book, article, website, etc.); two, the unique [citation ID][cc] you wish to remember the source by; and three, the fields of information about that source (author, title of work, date of publication, etc.). Below is an example entry, with each of these three components designated clearly\n\n<pre>\n\n&lt;!--bibtex\n\n@ENTRY TYPE{CITATION ID,\n FIELD 1 = {source specific information},\n FIELD 2 = {source specific informatio},\n FIEL 3 = {source specific informatio},\n FIELD 4 = {source specific informatio}\n}\n\n-->\n\n</pre>\n\nMore comprehensive documentation of what entry types and corresponding sets of required and optional fields BibTex supports can be found in the [Wikipedia article on BibTex][wikibibt].\n\nBelow is a section of the external bibliographic database for a fake history paper about the fictional island nation of Calico. (None of the entries contain information about real books or articles):\n\n[cc]: #5.2-Cite-Commands-and-Citation-IDs\n[wikibibt]: http://en.wikipedia.org/wiki/Markdown\n\n\n", "_____no_output_____" ], [ "<pre>\n\n&lt;!--bibtex\n\n@book{wellfarecut,\n title = {Our Greatest Threat: The Rise of Anti-Wellfare Politics in Calico in the 21st Century},\n author = {Jacob, Bernadette},\n year = {2010},\n publisher = {Jupyter University Press}\n}\n \n@article{militaryex2,\n title = {Rethinking Calican Military Expansion for the New Century},\n author = {Collier, Brian F.},\n journal = {Modern Politics},\n volume = {60},\n issue = {25},\n pages = {35 - 70},\n year = {2012} \n}\n\n@article{militaryex1,\n title = {Conservative Majority Passes Budget to Grow Military},\n author = {Lane, Lois},\n journal = {The Daily Calican},\n month = {October 19th, 2011},\n pages = {15 - 17},\n year = {2011}\n}\n\n@article{oildrill,\n title = {Oil Drilling Off the Coast of Jupyter Approved for Early Next Year},\n author = {Marks, Meghan L.},\n journal = {The Python Gazette},\n month = {December 5th, 2012},\n pages = {8 - 9},\n year = {2012}\n}\n\n@article{rieseinterview,\n title = {Interview with Up and Coming Freshman Senator, Alec Riese of Python},\n author = {Wilmington, Oliver},\n journal = {The Jupyter Times},\n month = {November 24th, 2012},\n pages = {4 - 7},\n year = {2012}\n}\n\n@book{calicoww2:1,\n title = {Calico and WWII: Untold History},\n author = {French, Viola},\n year = {1997},\n publisher = {Calicia City Free Press}\n}\n\n@book{calicoww2:2,\n title = {Rebuilding Calico After Japanese Occupation},\n author = {Kepps, Milo },\n year = {2002},\n publisher = {Python Books}\n}\n-->\n</pre>", "_____no_output_____" ], [ "## 5.2 Cite Commands and Citation IDs", "_____no_output_____" ], [ "When you want to cite a bibliographic entry from a database (either internal or external), you must know the citation ID, sometimes called the \"key\", for that entry. Citation IDs are strings of letters, numbers, and symbols that *you* make up, so they can be any word or combination of words you find easy to remember. Once, you've given an entry a citation ID, however, you do need to use that same ID every time you cite that source, so it may behoove you to keep your database organized. This way it will be much easier to locate any given source's entry and its potentially forgotten citation ID. \n\nOnce you know the citation ID for a given entry, use the following format to indicate to your Notebook's bibliographic support that you'd like to insert an in-text citation:\n\n<pre>\n[](&#35;cite-CITATION ID)\n</pre>\n\nThis format is the cite command. For example, if you wanted to cite *Rebuilding Calico After Japanese Occupation* listed above, you would use the cite command and the specific citation ID for that source:\n\n<pre>\n[](&#35;cite-calicoww2:2)\n</pre>\n\nBefore clicking the \"Generate References\" button, your unrendered text might look like this:\n\n\n<pre>\nRebuilding Calico took many years &#91;](&#35;cite-calicoww2:2).\n</pre>\n\n\nAfter clicking the \"Generate References\" button, your unrendered text might look like this:\n\n\n<pre>\nRebuilding Calico took many years &lt;a name=\"ref-1\"/>&#91;(Kepps, 2002)](#cite-calicoww2:2).\n</pre>\n\n\nand then the text would render as:\n\n\n>Rebuilding Calico took many years <a name=\"ref-1\"/>[(Kepps, 2002)](#cite-calicoww2:2).\n\n\nIn addition, a cell would be added at the bottom with the following contents:\n\n\n>#References\n\n><a name=\"cite-calicoww2:2\"/><sup>[^](#ref-1) [^](#ref-2) </sup>Kepps, Milo . 2002. _Rebuilding Calico After Japanese Occupation_.\n\n", "_____no_output_____" ], [ "# 6. Turning Your Jupyter Notebook into a Slideshow", "_____no_output_____" ], [ "To install slideshow support for your Notebook, go [here](http://nbviewer.ipython.org/github/fperez/nb-slideshow-template/blob/master/install-support.ipynb).\n\nTo see a tutorial and example slideshow, go [here](http://www.damian.oquanta.info/posts/make-your-slides-with-ipython.html).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d05a7d08a3b519cb69906e84ad56c099e13cfcad
1,700
ipynb
Jupyter Notebook
Day_3_Assignment.ipynb
ratikeshbajpai/Letsupgrade-Python
3c00559574d3f5580d9789f3f4296a260d5cecf0
[ "Apache-2.0" ]
null
null
null
Day_3_Assignment.ipynb
ratikeshbajpai/Letsupgrade-Python
3c00559574d3f5580d9789f3f4296a260d5cecf0
[ "Apache-2.0" ]
null
null
null
Day_3_Assignment.ipynb
ratikeshbajpai/Letsupgrade-Python
3c00559574d3f5580d9789f3f4296a260d5cecf0
[ "Apache-2.0" ]
null
null
null
23.943662
248
0.464706
[ [ [ "<a href=\"https://colab.research.google.com/github/ratikeshbajpai/Letsupgrade-Python/blob/master/Day_3_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# **Assignment 1 Day 3**", "_____no_output_____" ] ], [ [ "n = int(input(\"Enter the altitude\"))\nif n<=1000 :\n print(\"Safe to land\")\nelif n<=5000 and n>1000 :\n print(\"Bring Down to 1000\")\nelse :\n print(\"Turn Around\")", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ] ]
d05a984498feb86a9b97ad2aec019769957b59af
187,977
ipynb
Jupyter Notebook
Recommendations_with_IBM.ipynb
julie-data/recommendations-ibm-watson
3771be4041aa74009639e1bc42853761189aefcd
[ "IBM-pibs" ]
null
null
null
Recommendations_with_IBM.ipynb
julie-data/recommendations-ibm-watson
3771be4041aa74009639e1bc42853761189aefcd
[ "IBM-pibs" ]
null
null
null
Recommendations_with_IBM.ipynb
julie-data/recommendations-ibm-watson
3771be4041aa74009639e1bc42853761189aefcd
[ "IBM-pibs" ]
null
null
null
75.675121
41,656
0.743857
[ [ [ "# Recommendations with IBM\n\nIn this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform. \n\n\nYou may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.**\n\nBy following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations. \n\n\n## Table of Contents\n\nI. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br>\nII. [Rank Based Recommendations](#Rank)<br>\nIII. [User-User Based Collaborative Filtering](#User-User)<br>\nIV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br>\nV. [Matrix Factorization](#Matrix-Fact)<br>\nVI. [Extras & Concluding](#conclusions)\n\nAt the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport project_tests as t\nimport pickle\nfrom matplotlib.pyplot import figure\n\n%matplotlib inline\n\ndf = pd.read_csv('data/user-item-interactions.csv')\ndf_content = pd.read_csv('data/articles_community.csv')\ndel df['Unnamed: 0']\ndel df_content['Unnamed: 0']\n\n# Show df to get an idea of the data\ndf.head()", "_____no_output_____" ], [ "# Show df_content to get an idea of the data\ndf_content.head()", "_____no_output_____" ] ], [ [ "### <a class=\"anchor\" id=\"Exploratory-Data-Analysis\">Part I : Exploratory Data Analysis</a>\n\nUse the dictionary and cells below to provide some insight into the descriptive statistics of the data.\n\n`1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. ", "_____no_output_____" ] ], [ [ "# Count interactions per user, sorted\ninteractions = df.groupby('email').count().drop(['title'],axis=1)\ninteractions.columns = ['nb_articles']\ninteractions_sorted = interactions.sort_values(['nb_articles'])\ninteractions_sorted.head()", "_____no_output_____" ], [ "interactions_sorted.describe()", "_____no_output_____" ], [ "#plt.figure(figsize=(10,30))\nplt.style.use('ggplot')\n\ninteractions_plot = interactions_sorted.reset_index().groupby('nb_articles').count()\ninteractions_plot.plot.bar(figsize=(20,10))\n\nplt.title('Number of users per number of interactions')\nplt.xlabel('Interactions')\nplt.ylabel('Users')\nplt.legend(('Number of users',),prop={\"size\":10})\n\nplt.show()", "_____no_output_____" ], [ "# Fill in the median and maximum number of user_article interactios below\n\nmedian_val = 3 # 50% of individuals interact with ____ number of articles or fewer.\nmax_views_by_user = 364 # The maximum number of user-article interactions by any 1 user is ______.", "_____no_output_____" ] ], [ [ "`2.` Explore and remove duplicate articles from the **df_content** dataframe. ", "_____no_output_____" ] ], [ [ "row_per_article = df_content.groupby('article_id').count()\nduplicates = row_per_article[row_per_article['doc_full_name'] > 1].index\ndf_content[df_content['article_id'].isin(duplicates)].sort_values('article_id')", "_____no_output_____" ], [ "# Remove any rows that have the same article_id - only keep the first\ndf_content_no_duplicates = df_content.drop_duplicates('article_id')\ndf_content_no_duplicates[df_content_no_duplicates['article_id'].isin(duplicates)].sort_values('article_id')", "_____no_output_____" ] ], [ [ "`3.` Use the cells below to find:\n\n**a.** The number of unique articles that have an interaction with a user. \n**b.** The number of unique articles in the dataset (whether they have any interactions or not).<br>\n**c.** The number of unique users in the dataset. (excluding null values) <br>\n**d.** The number of user-article interactions in the dataset.", "_____no_output_____" ] ], [ [ "# Articles with an interaction\nlen(df['article_id'].unique())", "_____no_output_____" ], [ "# Total articles\nlen(df_content_no_duplicates['article_id'].unique())", "_____no_output_____" ], [ "# Unique users\nlen(df[df['email'].isnull() == False]['email'].unique())", "_____no_output_____" ], [ "# Unique interactions\nlen(df)", "_____no_output_____" ], [ "unique_articles = 714 # The number of unique articles that have at least one interaction\ntotal_articles = 1051 # The number of unique articles on the IBM platform\nunique_users = 5148 # The number of unique users\nuser_article_interactions = 45993 # The number of user-article interactions", "_____no_output_____" ] ], [ [ "`4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).", "_____no_output_____" ] ], [ [ "df.groupby('article_id').count().sort_values(by='email',ascending = False).head(1)", "_____no_output_____" ], [ "most_viewed_article_id = str(1429.0) # The most viewed article in the dataset as a string with one value following the decimal \nmax_views = 937 # The most viewed article in the dataset was viewed how many times?", "_____no_output_____" ], [ "## No need to change the code here - this will be helpful for later parts of the notebook\n# Run this cell to map the user email to a user_id column and remove the email column\n\ndef email_mapper():\n coded_dict = dict()\n cter = 1\n email_encoded = []\n \n for val in df['email']:\n if val not in coded_dict:\n coded_dict[val] = cter\n cter+=1\n \n email_encoded.append(coded_dict[val])\n return email_encoded\n\nemail_encoded = email_mapper()\ndel df['email']\ndf['user_id'] = email_encoded\n\n# show header\ndf.head()", "_____no_output_____" ], [ "## If you stored all your results in the variable names above, \n## you shouldn't need to change anything in this cell\n\nsol_1_dict = {\n '`50% of individuals have _____ or fewer interactions.`': median_val,\n '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,\n '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,\n '`The most viewed article in the dataset was viewed _____ times.`': max_views,\n '`The article_id of the most viewed article is ______.`': most_viewed_article_id,\n '`The number of unique articles that have at least 1 rating ______.`': unique_articles,\n '`The number of unique users in the dataset is ______`': unique_users,\n '`The number of unique articles on the IBM platform`': total_articles\n}\n\n# Test your dictionary against the solution\nt.sol_1_test(sol_1_dict)", "It looks like you have everything right here! Nice job!\n" ] ], [ [ "### <a class=\"anchor\" id=\"Rank\">Part II: Rank-Based Recommendations</a>\n\nUnlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.\n\n`1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.", "_____no_output_____" ] ], [ [ "def get_top_articles(n, df=df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n top_articles = list(df.groupby('title').count().sort_values(by='user_id',ascending = False).head(n).index)\n \n return top_articles # Return the top article titles from df (not df_content)\n\ndef get_top_article_ids(n, df=df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n top_articles = list(df.groupby('article_id').count().sort_values(by='user_id',ascending = False).head(n).index.astype(str))\n \n return top_articles # Return the top article ids", "_____no_output_____" ], [ "print(get_top_articles(10))\nprint(get_top_article_ids(10))", "['use deep learning for image classification', 'insights from new york car accident reports', 'visualize car data with brunel', 'use xgboost, scikit-learn & ibm watson machine learning apis', 'predicting churn with the spss random tree algorithm', 'healthcare python streaming application demo', 'finding optimal locations of new store using decision optimization', 'apache spark lab, part 1: basic concepts', 'analyze energy consumption in buildings', 'gosales transactions for logistic regression model']\n['1429.0', '1330.0', '1431.0', '1427.0', '1364.0', '1314.0', '1293.0', '1170.0', '1162.0', '1304.0']\n" ], [ "# Test your function by returning the top 5, 10, and 20 articles\ntop_5 = get_top_articles(5)\ntop_10 = get_top_articles(10)\ntop_20 = get_top_articles(20)\n\n# Test each of your three lists from above\nt.sol_2_test(get_top_articles)", "Your top_5 looks like the solution list! Nice job.\nYour top_10 looks like the solution list! Nice job.\nYour top_20 looks like the solution list! Nice job.\n" ] ], [ [ "### <a class=\"anchor\" id=\"User-User\">Part III: User-User Based Collaborative Filtering</a>\n\n\n`1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. \n\n* Each **user** should only appear in each **row** once.\n\n\n* Each **article** should only show up in one **column**. \n\n\n* **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. \n\n\n* **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. \n\nUse the tests to make sure the basic structure of your matrix matches what is expected by the solution.", "_____no_output_____" ] ], [ [ "# create the user-article matrix with 1's and 0's\n\ndef create_user_item_matrix(df):\n '''\n INPUT:\n df - pandas dataframe with article_id, title, user_id columns\n \n OUTPUT:\n user_item - user item matrix \n \n Description:\n Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with \n an article and a 0 otherwise\n '''\n user_item = df.groupby(['user_id', 'article_id']).count().groupby(['user_id', 'article_id']).count().unstack()\n user_item = user_item.fillna(0)\n \n return user_item # return the user_item matrix \n\nuser_item = create_user_item_matrix(df)", "_____no_output_____" ], [ "## Tests: You should just need to run this cell. Don't change the code.\nassert user_item.shape[0] == 5149, \"Oops! The number of users in the user-article matrix doesn't look right.\"\nassert user_item.shape[1] == 714, \"Oops! The number of articles in the user-article matrix doesn't look right.\"\nassert user_item.sum(axis=1)[1] == 36, \"Oops! The number of articles seen by user 1 doesn't look right.\"\nprint(\"You have passed our quick tests! Please proceed!\")", "You have passed our quick tests! Please proceed!\n" ] ], [ [ "`2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. \n\nUse the tests to test your function.", "_____no_output_____" ] ], [ [ "def find_similar_users(user_id, user_item=user_item):\n '''\n INPUT:\n user_id - (int) a user_id\n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '''\n # compute similarity of each user to the provided user\n similarities_matrix = user_item.dot(np.transpose(user_item))\n similarities_user = similarities_matrix[similarities_matrix.index == user_id].transpose()\n similarities_user.columns = ['similarities']\n \n # sort by similarity\n similarities_sorted = similarities_user.sort_values(by = 'similarities', ascending=False)\n\n # create list of just the ids\n most_similar_users = list(similarities_sorted.index)\n \n # remove the own user's id\n most_similar_users.remove(user_id)\n \n return most_similar_users # return a list of the users in order from most to least similar\n ", "_____no_output_____" ], [ "# Do a spot check of your function\nprint(\"The 10 most similar users to user 1 are: {}\".format(find_similar_users(1)[:10]))\nprint(\"The 5 most similar users to user 3933 are: {}\".format(find_similar_users(3933)[:5]))\nprint(\"The 3 most similar users to user 46 are: {}\".format(find_similar_users(46)[:3]))", "The 10 most similar users to user 1 are: [3933, 23, 3782, 203, 4459, 3870, 131, 4201, 46, 5041]\nThe 5 most similar users to user 3933 are: [1, 23, 3782, 203, 4459]\nThe 3 most similar users to user 46 are: [4201, 3782, 23]\n" ] ], [ [ "`3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. ", "_____no_output_____" ] ], [ [ "def get_article_names(article_ids, df=df):\n '''\n INPUT:\n article_ids - (list) a list of article ids\n df - (pandas dataframe) df as defined at the top of the notebook\n \n OUTPUT:\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the title column)\n '''\n article_names = df[df['article_id'].isin(article_ids)]['title'].unique().tolist()\n \n return article_names # Return the article names associated with list of article ids\n\n\ndef get_user_articles(user_id, user_item=user_item):\n '''\n INPUT:\n user_id - (int) a user id\n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '''\n user_transpose = user_item[user_item.index == user_id].transpose()\n user_transpose.columns = ['seen']\n \n article_ids = list(user_transpose[user_transpose['seen'] == 1].reset_index()['article_id'].astype(str))\n article_names = get_article_names(article_ids,df)\n \n return article_ids, article_names # return the ids and names\n\n\ndef user_user_recs(user_id, m=10):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n Users who are the same closeness are chosen arbitrarily as the 'next' user\n \n For the user where the number of recommended articles starts below m \n and ends exceeding m, the last items are chosen arbitrarily\n \n '''\n articles_seen = get_user_articles(user_id)\n closest_users = find_similar_users(user_id)\n \n # Keep the recommended articles here\n recs = np.array([])\n \n # Go through the users and identify articles they have seen the user hasn't seen\n for user in closest_users:\n users_articles_seen_id, users_articles_seen_name = get_user_articles(user)\n \n #Obtain recommendations for each neighbor\n new_recs = np.setdiff1d(users_articles_seen_id, articles_seen, assume_unique=True)\n \n # Update recs with new recs\n recs = np.unique(np.concatenate([new_recs, recs], axis=0))\n \n # If we have enough recommendations exit the loop\n if len(recs) > m-1:\n break\n \n # Pick the first m\n recs = recs[0:m]\n \n return recs # return your recommendations for this user_id ", "_____no_output_____" ], [ "# Test your functions here - No need to change this code - just run this cell\nassert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), \"Oops! Your the get_article_names function doesn't work quite how we expect.\"\nassert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), \"Oops! Your the get_article_names function doesn't work quite how we expect.\"\nassert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0'])\nassert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook'])\nassert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])\nassert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])\nprint(\"If this is all you see, you passed all of our tests! Nice job!\")", "If this is all you see, you passed all of our tests! Nice job!\n" ] ], [ [ "`4.` Now we are going to improve the consistency of the **user_user_recs** function from above. \n\n* Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.\n\n\n* Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.", "_____no_output_____" ] ], [ [ "def get_top_sorted_users(user_id, df=df, user_item=user_item):\n '''\n INPUT:\n user_id - (int)\n df - (pandas dataframe) df as defined at the top of the notebook \n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '''\n # Get similarities\n similarities_matrix = user_item.dot(np.transpose(user_item))\n similarities_user = similarities_matrix[similarities_matrix.index == user_id].transpose()\n similarities_user.columns = ['similarities']\n \n # Get interactions\n interactions = df.groupby('user_id').count().drop(['title'],axis=1)\n interactions.columns = ['interactions']\n \n # Merge similarities with interactions\n neighbors_df_not_sorted = similarities_user.join(interactions, how='left')\n neighbors_df = neighbors_df_not_sorted.sort_values(by = ['similarities', 'interactions'], ascending = False)\n \n return neighbors_df # Return the dataframe specified in the doc_string\n\n\ndef user_user_recs_part2(user_id, m=10):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n * Choose the users that have the most total article interactions \n before choosing those with fewer article interactions.\n\n * Choose articles with the articles with the most total interactions \n before choosing those with fewer total interactions. \n \n '''\n articles_seen = get_user_articles(user_id)\n closest_users = get_top_sorted_users(user_id).index.tolist()\n closest_users.remove(user_id)\n top_articles_all = get_top_article_ids(len(df))\n \n # Keep the recommended articles here\n recs = np.array([])\n \n # Go through the users and identify articles they have seen the user hasn't seen\n for user in closest_users:\n users_articles_seen_id, users_articles_seen_name = get_user_articles(user)\n # Sort articles according to the number of interactions\n users_articles_seen_id = sorted(users_articles_seen_id, key=lambda x: top_articles_all.index(x))\n \n # Obtain recommendations for each neighbor\n new_recs = np.setdiff1d(users_articles_seen_id, articles_seen, assume_unique=True)\n \n # Update recs with new recs\n recs = np.unique(np.concatenate([new_recs, recs], axis=0))\n \n # If we have enough recommendations exit the loop\n if len(recs) > m-1:\n break\n \n # Pick the first m\n recs = recs[0:m].tolist()\n \n # Get rec names\n rec_names = get_article_names(recs)\n \n return recs, rec_names", "_____no_output_____" ], [ "# Quick spot check - don't change this code - just use it to test your functions\nrec_ids, rec_names = user_user_recs_part2(20, 10)\nprint(\"The top 10 recommendations for user 20 are the following article ids:\")\nprint(rec_ids)\nprint()\nprint(\"The top 10 recommendations for user 20 are the following article names:\")\nprint(rec_names)", "The top 10 recommendations for user 20 are the following article ids:\n['1024.0', '1085.0', '109.0', '1150.0', '1151.0', '1152.0', '1153.0', '1154.0', '1157.0', '1160.0']\n\nThe top 10 recommendations for user 20 are the following article names:\n['airbnb data for analytics: washington d.c. listings', 'analyze accident reports on amazon emr spark', 'tensorflow quick tips', 'airbnb data for analytics: venice listings', 'airbnb data for analytics: venice calendar', 'airbnb data for analytics: venice reviews', 'using deep learning to reconstruct high-resolution audio', 'airbnb data for analytics: vienna listings', 'airbnb data for analytics: vienna calendar', 'airbnb data for analytics: chicago listings']\n" ] ], [ [ "`5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.", "_____no_output_____" ] ], [ [ "### Tests with a dictionary of results\nuser1_most_sim = get_top_sorted_users(1).iloc[1].name #Find the user that is most similar to user 1 \nuser131_10th_sim = get_top_sorted_users(131).iloc[10].name #Find the 10th most similar user to user 131", "_____no_output_____" ], [ "## Dictionary Test Here\nsol_5_dict = {\n 'The user that is most similar to user 1.': user1_most_sim, \n 'The user that is the 10th most similar to user 131': user131_10th_sim,\n}\n\nt.sol_5_test(sol_5_dict)", "This all looks good! Nice job!\n" ] ], [ [ "`6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.", "_____no_output_____" ], [ "We would provide the top articles for all the users.", "_____no_output_____" ], [ "`7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.", "_____no_output_____" ] ], [ [ "new_user = '0.0'\n\n# What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles.\n# Provide a list of the top 10 article ids you would give to \nnew_user_recs = get_top_article_ids(10)", "_____no_output_____" ], [ "assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), \"Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users.\"\n\nprint(\"That's right! Nice job!\")", "That's right! Nice job!\n" ] ], [ [ "### <a class=\"anchor\" id=\"Content-Recs\">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a>\n\nAnother method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. \n\n`1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations.\n\n### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.", "_____no_output_____" ] ], [ [ "def make_content_recs():\n '''\n INPUT:\n \n OUTPUT:\n \n '''", "_____no_output_____" ] ], [ [ "`2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender?\n\n### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.", "_____no_output_____" ], [ "**Write an explanation of your content based recommendation system here.**", "_____no_output_____" ], [ "`3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations.\n\n### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.", "_____no_output_____" ] ], [ [ "# make recommendations for a brand new user\n\n\n# make a recommendations for a user who only has interacted with article id '1427.0'\n\n", "_____no_output_____" ] ], [ [ "### <a class=\"anchor\" id=\"Matrix-Fact\">Part V: Matrix Factorization</a>\n\nIn this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.\n\n`1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. ", "_____no_output_____" ] ], [ [ "# Load the matrix here\nuser_item_matrix = pd.read_pickle('user_item_matrix.p')", "_____no_output_____" ], [ "# quick look at the matrix\nuser_item_matrix.head()", "_____no_output_____" ] ], [ [ "`2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.", "_____no_output_____" ] ], [ [ "# Perform SVD on the User-Item Matrix Here\nu, s, vt = np.linalg.svd(user_item_matrix)", "_____no_output_____" ], [ "s.shape, u.shape, vt.shape", "_____no_output_____" ], [ "# Change the dimensions of u, s, and vt as necessary\n# update the shape of u and store in u_new\nu_new = u[:, :len(s)]\n\n# update the shape of s and store in s_new\ns_new = np.zeros((len(s), len(s)))\ns_new[:len(s), :len(s)] = np.diag(s) \n\n# Because we are using 4 latent features and there are only 4 movies, \n# vt and vt_new are the same\nvt_new = vt", "_____no_output_____" ], [ "s_new.shape, u_new.shape, vt_new.shape", "_____no_output_____" ] ], [ [ "There are no null values in the matrix since we are not using ratings, but whether the user has seen an article or not. Therefore it is enough for us to use SVD, we do not need to use funkSVD which needs to be used when handling null values.", "_____no_output_____" ], [ "`3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.", "_____no_output_____" ] ], [ [ "num_latent_feats = np.arange(10,700+10,20)\nsum_errs = []\n\nfor k in num_latent_feats:\n # restructure with k latent features\n s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]\n \n # take dot product\n user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))\n \n # compute error for each prediction to actual value\n diffs = np.subtract(user_item_matrix, user_item_est)\n \n # total errors and keep track of them\n err = np.sum(np.sum(np.abs(diffs)))\n sum_errs.append(err)\n \n \nplt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);\nplt.xlabel('Number of Latent Features');\nplt.ylabel('Accuracy');\nplt.title('Accuracy vs. Number of Latent Features');", "_____no_output_____" ] ], [ [ "`4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. \n\nUse the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: \n\n* How many users can we make predictions for in the test set? \n* How many users are we not able to make predictions for because of the cold start problem?\n* How many articles can we make predictions for in the test set? \n* How many articles are we not able to make predictions for because of the cold start problem?", "_____no_output_____" ] ], [ [ "df_train = df.head(40000)\ndf_test = df.tail(5993)\n\ndef create_test_and_train_user_item(df_train, df_test):\n '''\n INPUT:\n df_train - training dataframe\n df_test - test dataframe\n \n OUTPUT:\n user_item_train - a user-item matrix of the training dataframe \n (unique users for each row and unique articles for each column)\n user_item_test - a user-item matrix of the testing dataframe \n (unique users for each row and unique articles for each column)\n test_idx - all of the test user ids\n test_arts - all of the test article ids\n \n '''\n \n # Get user_item_matrices\n user_item_train = create_user_item_matrix(df_train) \n user_item_test = create_user_item_matrix(df_test) \n \n # Get user ids\n test_idx = user_item_test.index.tolist()\n \n # Get article ids\n test_arts = user_item_test.columns.droplevel().tolist()\n \n return user_item_train, user_item_test, test_idx, test_arts\n\nuser_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)", "_____no_output_____" ], [ "print('1. How many users can we make predictions for in the test set?')\nqst_1 = len(np.intersect1d(test_idx, user_item_train.index.tolist(), assume_unique=True))\nprint(qst_1)\nprint('')\n\nprint('2. How many users in the test set are we not able to make predictions for because of the cold start problem?')\nprint(len(test_idx) - qst_1)\nprint('')\n\nprint('3. How many movies can we make predictions for in the test set?')\nqst_3 = len(np.intersect1d(test_arts, user_item_train.columns.droplevel().tolist(), assume_unique=True))\nprint(qst_3)\nprint('')\n\nprint('4. How many movies in the test set are we not able to make predictions for because of the cold start problem')\nprint(len(test_arts) - qst_3)\nprint('')", "1. How many users can we make predictions for in the test set?\n20\n\n2. How many users in the test set are we not able to make predictions for because of the cold start problem?\n662\n\n3. How many movies can we make predictions for in the test set?\n574\n\n4. How many movies in the test set are we not able to make predictions for because of the cold start problem\n0\n\n" ], [ "# Replace the values in the dictionary below\na = 662 \nb = 574 \nc = 20 \nd = 0 \n\n\nsol_4_dict = {\n 'How many users can we make predictions for in the test set?': c, \n 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, \n 'How many movies can we make predictions for in the test set?': b,\n 'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d\n}\n\nt.sol_4_test(sol_4_dict)", "Awesome job! That's right! All of the test movies are in the training data, but there are only 20 test users that were also in the training set. All of the other users that are in the test set we have no data on. Therefore, we cannot make predictions for these users using SVD.\n" ] ], [ [ "Please note that I had to modify 'articles' to 'movies' otherwise the function would not get the right result. However, we are talking about articles here, not movies.", "_____no_output_____" ], [ "`5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.\n\nUse the cells below to explore how well SVD works towards making predictions for recommendations on the test data. ", "_____no_output_____" ] ], [ [ "# fit SVD on the user_item_train matrix\nu_train, s_train, vt_train = np.linalg.svd(user_item_train)# fit svd similar to above then use the cells below", "_____no_output_____" ], [ "s_train.shape, u_train.shape, vt_train.shape", "_____no_output_____" ], [ "# Find users to predict in test matrix\nusers_to_predict = np.intersect1d(test_idx, user_item_train.index.tolist(), assume_unique=True).tolist()\n\n# Get filtered test matrix\nuser_item_test_f = user_item_test[user_item_test.index.isin(users_to_predict)]\n\n# Get position of the users to predict in the train matrix\nusers_train_pos = user_item_train.reset_index()[user_item_train.reset_index()['user_id'].isin(users_to_predict)].index.tolist()", "_____no_output_____" ], [ "# Find articles to predict in test matrix\narticles_to_predict = np.intersect1d(test_arts, user_item_test.columns.droplevel().tolist(), assume_unique=True).tolist()\n\n# Get position of the articles to predict in the train matrix\narticles_train_pos = user_item_train.columns.droplevel().isin(articles_to_predict)", "_____no_output_____" ], [ "# Get u and vt matrices for train\nu_test = u_train[users_train_pos,:] \nvt_test = vt_train[:,articles_train_pos]", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score", "_____no_output_____" ], [ "# Use these cells to see how well you can use the training \n# decomposition to predict on test data\n\nnum_latent_feats = np.arange(10,700+10,20)\nsum_errs_train = []\nsum_errs_test = []\n\ntrain_errs = []\ntest_errs = []\n\nfor k in num_latent_feats:\n # restructure with k latent features\n s_train_new, u_train_new, vt_train_new = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :]\n u_test_new, vt_test_new = u_test[:, :k], vt_test[:k, :]\n \n # take dot product\n user_item_est_train = np.around(np.dot(np.dot(u_train_new, s_train_new), vt_train_new))\n user_item_est_test = np.around(np.dot(np.dot(u_test_new, s_train_new), vt_test_new))\n \n # compute error for each prediction to actual value\n diffs_train = np.subtract(user_item_train, user_item_est_train)\n diffs_test = np.subtract(user_item_test_f, user_item_est_test)\n \n # total errors and keep track of them\n err_train = np.sum(np.sum(np.abs(diffs_train)))\n sum_errs_train.append(err_train)\n \n err_test = np.sum(np.sum(np.abs(diffs_test)))\n sum_errs_test.append(err_test) \n \n# number of interactions\nnb_interactions_train = user_item_est_train.shape[0] * user_item_est_train.shape[1]\nnb_interactions_test = user_item_est_test.shape[0] * user_item_est_test.shape[1]\n \nplt.plot(num_latent_feats, 1 - np.array(sum_errs_train)/nb_interactions_train, label = 'Train');\nplt.plot(num_latent_feats, 1 - np.array(sum_errs_test)/nb_interactions_test, label = 'Test');\nplt.xlabel('Number of Latent Features');\nplt.ylabel('Accuracy');\nplt.title('Accuracy vs. Number of Latent Features');\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "`6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? ", "_____no_output_____" ], [ "When using SVD on the test dataset, the accuracy decreases with the number of latent factors. This is due to the fact that only a small amount of users (20) are common between the train and test dataset. This makes that our data (0s and 1s) is imbalanced, that there is a big disproportionate ratio of 0s in the dataset compared to the 1s.\n\nMoreover, when increasing the number of latent factors, we are increasing the overfitting on the training data (accuracy increases), which also explains why the accuracy on the testing dataset decreases.\n\nIn order to understand if our results are working in practice, I would conduct an experiment. I would split my users into three groups with different treatments:\n- Group 1: do not receive any recommendation\n- Group 2: receives recommendations from a mix of collaborative filtering and top ranked\n- Group 3: receives recommendations from matrix factorization\n\nWe would split the users on a cookie-based, so that they see the same experience everytime they check the website. \n\nI would do the following tests:\n- Group 1 vs. Group 2, where the null hypothesis is that there is no difference between not providing recommendations to users and providing collaborative + top ranked-based recommendations\n- Group 1 vs. Group 3, where the null hypothesis that there is no difference between not providing recommendations to suers and providing matrix factorization-based recommendations\n\nThe success metric will be the number of clicks on articles per user per session on the website. This would mainly focus on the novelty effect of the recommendations, as we would assume users would click on articles if they have not seen them before. It could also be beneficial if the users could rate the article (even with just thumbs up or down), in order to know if the article was interesting for them, and therefore if the recommendations are relevant.\n\nThis success metric would need to be statistically significant to go ahead with implementing the recommendations engine, and which method. If it is not, we'd need to understand if other factors would justify the implementation of the recommendations engine.\n\nWe would also check the invariant metric within our groups to be sure that they are equivalent (e.g. one group contains a high share of users who have already seen more than 200 articles (for example) while another group contains a high share of users who have only been through 10 articles).", "_____no_output_____" ] ], [ [ "from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d05a9e5a5bc56600027b81f74ff96f98f11aa314
10,120
ipynb
Jupyter Notebook
01-Lesson-Plans/04-Pandas/1/Activities/09-Ins_ReadingWritingCSV/Solved/pandas_reading_files.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
01-Lesson-Plans/04-Pandas/1/Activities/09-Ins_ReadingWritingCSV/Solved/pandas_reading_files.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
01-Lesson-Plans/04-Pandas/1/Activities/09-Ins_ReadingWritingCSV/Solved/pandas_reading_files.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
27.955801
79
0.411561
[ [ [ "# Dependencies\nimport pandas as pd", "_____no_output_____" ], [ "# Store filepath in a variable\nfile_one = \"Resources/DataOne.csv\"", "_____no_output_____" ], [ "# Read our Data file with the pandas library\n# Not every CSV requires an encoding, but be aware this can come up\nfile_one_df = pd.read_csv(file_one, encoding=\"ISO-8859-1\")", "_____no_output_____" ], [ "# Show just the header\nfile_one_df.head()", "_____no_output_____" ], [ "# Show a single column\nfile_one_df[\"full_name\"].head()", "_____no_output_____" ], [ "# Show mulitple specific columns--note the extra brackets\nfile_one_df[[\"full_name\", \"email\"]].head()", "_____no_output_____" ], [ "# Head does not change the DataFrame--it only displays it\nfile_one_df.head()", "_____no_output_____" ], [ "# Export file as a CSV, without the Pandas index, but with the header\nfile_one_df.to_csv(\"Output/fileOne.csv\", index=False, header=True)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05a9ebc86d6104d85f24d8fc39af835a105ef6c
307,527
ipynb
Jupyter Notebook
notebooks/c0.2-sg-feature-engineering.ipynb
sindolfoGomes/rossmann-stores-sales
a431c4d41ee6e60a7e19b3a4b9cda89cadc57f4b
[ "FSFAP" ]
null
null
null
notebooks/c0.2-sg-feature-engineering.ipynb
sindolfoGomes/rossmann-stores-sales
a431c4d41ee6e60a7e19b3a4b9cda89cadc57f4b
[ "FSFAP" ]
null
null
null
notebooks/c0.2-sg-feature-engineering.ipynb
sindolfoGomes/rossmann-stores-sales
a431c4d41ee6e60a7e19b3a4b9cda89cadc57f4b
[ "FSFAP" ]
null
null
null
213.264216
205,408
0.881575
[ [ [ "# 0.0. IMPORTS", "_____no_output_____" ] ], [ [ "import inflection\nimport math\nimport datetime \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfrom IPython.core.display import HTML\nfrom IPython.display import Image\n", "_____no_output_____" ] ], [ [ "## 0.1. Helper Functions", "_____no_output_____" ] ], [ [ "def load_csv(path):\n df = pd.read_csv(path, low_memory=False)\n return df\n\ndef rename_columns(df, old_columns):\n snakecase = lambda x: inflection.underscore(x)\n\n cols_new = list(map(snakecase, old_columns))\n\n print(f\"Old columns: {df.columns.to_list()}\")\n \n # Rename\n df.columns = cols_new\n \n print(f\"\\nNew columns: {df.columns.to_list()}\")\n\n print('\\n', df.columns)\n return df\n\ndef show_dimensions(df):\n print(f\"Number of Rows: {df1.shape[0]}\")\n print(f\"Number of Columns: {df1.shape[1]}\")\n print(f\"Shape: {df1.shape}\")\n \n return None\n\ndef show_data_types(df):\n print(df.dtypes)\n \n return None\n\ndef check_na(df):\n print(df.isna().sum())\n \n return None\n\ndef show_descriptive_statistical(df):\n # Central Tendency - mean, median\n ct1 = pd.DataFrame(df.apply(np.mean)).T\n ct2 = pd.DataFrame(df.apply(np.median)).T\n\n # Dispersion - std, min, max, range, skew, kurtosis\n d1 = pd.DataFrame(df.apply(np.std)).T\n d2 = pd.DataFrame(df.apply(min)).T\n d3 = pd.DataFrame(df.apply(max)).T\n d4 = pd.DataFrame(df.apply(lambda x: x.max() - x.min())).T\n d5 = pd.DataFrame(df.apply(lambda x: x.skew())).T\n d6 = pd.DataFrame(df.apply(lambda x: x.kurtosis())).T\n\n m = pd.concat([d2, d3, d4, ct1, ct2, d1, d5, d6]).T.reset_index()\n m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']\n print(m)\n \ndef jupyter_settings():\n %matplotlib inline\n %pylab inline\n \n plt.style.use( 'ggplot')\n plt.rcParams['figure.figsize'] = [24, 9]\n plt.rcParams['font.size'] = 24\n \n display( HTML( '<style>.container { width:100% !important; }</style>') )\n pd.options.display.max_columns = None\n pd.options.display.max_rows = None\n pd.set_option( 'display.expand_frame_repr', False )\n \n sns.set()", "_____no_output_____" ], [ "jupyter_settings()", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "## 0.2. Path Definition", "_____no_output_____" ] ], [ [ "# path\nhome_path = 'C:\\\\Users\\\\sindolfo\\\\rossmann-stores-sales\\\\'\nraw_data_path = 'data\\\\raw\\\\'\ninterim_data_path = 'data\\\\interim\\\\'\nfigures = 'reports\\\\figures\\\\'", "_____no_output_____" ] ], [ [ "## 0.3. Loading Data", "_____no_output_____" ] ], [ [ "## Historical data including Sales\ndf_sales_raw = load_csv(home_path+raw_data_path+'train.csv')\n\n## Supplemental information about the stores\ndf_store_raw = load_csv(home_path+raw_data_path+'store.csv')\n\n# Merge\ndf_raw = pd.merge(df_sales_raw, df_store_raw, how='left', on='Store')", "_____no_output_____" ] ], [ [ "# 1.0. DATA DESCRIPTION", "_____no_output_____" ] ], [ [ "df1 = df_raw.copy()\ndf1.to_csv(home_path+interim_data_path+'df1.csv')", "_____no_output_____" ] ], [ [ "### Data fields\n", "_____no_output_____" ], [ "\nMost of the fields are self-explanatory. The following are descriptions for those that aren't.\n\n- **Id** - an Id that represents a (Store, Date) duple within the test set\n- **Store** - a unique Id for each store\n- **Sales** - the turnover for any given day (this is what you are predicting)\n- **Customers** - the number of customers on a given day\n- **Open** - an indicator for whether the store was open: 0 = closed, 1 = open\n- **StateHoliday** - indicates a state holiday. Normally\nall stores, with few exceptions, are closed on state holidays. Note that all schools are closed on public holidays and weekends. a = public\nholiday, b = Easter holiday, c = Christmas, 0 = None\n- **SchoolHoliday** - indicates if the (Store, Date) was affected by the closure of public schools\n- **StoreType** - differentiates between 4 different store models: a, b, c, d\n- **Assortment** - describes an assortment level: a = basic, b = extra, c = extended\n- **CompetitionDistance** - distance in meters to the nearest competitor store\n- **CompetitionOpenSince[Month/Year]** - gives the approximate year and month of the time the nearest competitor was opened\n- **Promo** - indicates whether a store is running a promo on that day\n- **Promo2** - Promo2 is a continuing and consecutive promotion for some stores: 0 = store is not participating, 1 = store is participating\n- **Promo2Since[Year/Week]** - describes the year and calendar week when the store started participating in Promo2\n- **PromoInterval** - describes the consecutive intervals Promo2 is started, naming the months the promotion is started anew.\nE.g. \"Feb,May,Aug,Nov\" means each round starts in February, May, August, November of any given year for that store", "_____no_output_____" ], [ "## 1.1. Rename Columns", "_____no_output_____" ] ], [ [ "cols_old = [\n 'Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo',\n 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',\n 'CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',\n 'Promo2SinceYear', 'PromoInterval'\n]\n\ndf1 = rename_columns(df1, cols_old)", "Old columns: ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval']\n\nNew columns: ['store', 'day_of_week', 'date', 'sales', 'customers', 'open', 'promo', 'state_holiday', 'school_holiday', 'store_type', 'assortment', 'competition_distance', 'competition_open_since_month', 'competition_open_since_year', 'promo2', 'promo2_since_week', 'promo2_since_year', 'promo_interval']\n\n Index(['store', 'day_of_week', 'date', 'sales', 'customers', 'open', 'promo',\n 'state_holiday', 'school_holiday', 'store_type', 'assortment',\n 'competition_distance', 'competition_open_since_month',\n 'competition_open_since_year', 'promo2', 'promo2_since_week',\n 'promo2_since_year', 'promo_interval'],\n dtype='object')\n" ] ], [ [ "## 1.2. Data Dimensions", "_____no_output_____" ] ], [ [ "show_dimensions(df1)", "Number of Rows: 1017209\nNumber of Columns: 18\nShape: (1017209, 18)\n" ] ], [ [ "## 1.3. Data Types", "_____no_output_____" ] ], [ [ "show_data_types(df1)\n\n## Date is a object type. This is wrong. In the section \"Types Changes\" others chages is made. \ndf1['date'] = pd.to_datetime(df1['date'])", "store int64\nday_of_week int64\ndate object\nsales int64\ncustomers int64\nopen int64\npromo int64\nstate_holiday object\nschool_holiday int64\nstore_type object\nassortment object\ncompetition_distance float64\ncompetition_open_since_month float64\ncompetition_open_since_year float64\npromo2 int64\npromo2_since_week float64\npromo2_since_year float64\npromo_interval object\ndtype: object\n" ] ], [ [ "## 1.4. Check NA", "_____no_output_____" ] ], [ [ "check_na(df1)\n\n## Columns with NA vales\n## competition_distance 2642\n## competition_open_since_month 323348\n## competition_open_since_year 323348\n## promo2_since_week 508031\n## promo2_since_year 508031\n## promo_interval 508031", "store 0\nday_of_week 0\ndate 0\nsales 0\ncustomers 0\nopen 0\npromo 0\nstate_holiday 0\nschool_holiday 0\nstore_type 0\nassortment 0\ncompetition_distance 2642\ncompetition_open_since_month 323348\ncompetition_open_since_year 323348\npromo2 0\npromo2_since_week 508031\npromo2_since_year 508031\npromo_interval 508031\ndtype: int64\n" ] ], [ [ "## 1.5. Fillout NA", "_____no_output_____" ] ], [ [ "# competition_distance: distance in meters to the nearest competitor store\n# \n# Assumption: if there is a row that is NA in this column, \n# it is because there is no close competitor. \n# The way I used to represent this is to put \n# a number much larger than the maximum value \n# of the competition_distance variable.\n# \n# The number is 250000.\ndf1['competition_distance'] = df1['competition_distance'].apply(lambda x : 250000 if math.isnan(x) else x)\n\n\n# competition_open_since_month: \n# gives the approximate year and month of the \n# time the nearest competitor was opened\n#\n# Assumption: I'm going to keep this variable because \n# it's important to have something that expresses \n# the feeling of \"since it happened\" or \"until when\".\n# \n# If it's NA I'll copy the month of sale of that line.\ndf1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month']) else x['competition_open_since_month'], axis=1)\n\n#competition_open_since_year \n# The same assumption from competition_open_since_month\ndf1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year']) else x['competition_open_since_year'], axis=1)\n\n\n# promo2_since_week: \n# describes the year and calendar week when the store started participating in Promo2\n# \n# The same assumption from competition_open_since_month\ndf1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1)\n\n \n# promo2_since_year:\n# describes the year and calendar week when the store started participating in Promo2\ndf1['promo2_since_year'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1)\n\n\n# promo_interval\nmonth_map = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}\n\ndf1['promo_interval'].fillna(0, inplace=True)\n\ndf1['month_map'] = df1['date'].dt.month.map(month_map)\n\ndf1['is_promo'] = df1[['promo_interval', 'month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)", "_____no_output_____" ] ], [ [ "## 1.6. Type Changes", "_____no_output_____" ] ], [ [ "df1['competition_open_since_month'] = df1['competition_open_since_month'].astype('int64')\ndf1['competition_open_since_year'] = df1['competition_open_since_year'].astype('int64')\ndf1['promo2_since_week'] = df1['promo2_since_week'].astype('int64')\ndf1['promo2_since_year'] = df1['promo2_since_year'].astype('int64')", "_____no_output_____" ] ], [ [ "## 1.7. Descriptive Statistical", "_____no_output_____" ] ], [ [ "num_attributes = df1.select_dtypes(include=['int64', 'float64'])\ncat_attributes = df1.select_dtypes(exclude=['int64', 'float64', 'datetime64[ns]'])", "_____no_output_____" ] ], [ [ "### 1.7.1 Numerical Attributes", "_____no_output_____" ] ], [ [ "show_descriptive_statistical(num_attributes)", " attributes min max range mean median std skew kurtosis\n0 store 1.0 1115.0 1114.0 558.429727 558.0 321.908493 -0.000955 -1.200524\n1 day_of_week 1.0 7.0 6.0 3.998341 4.0 1.997390 0.001593 -1.246873\n2 sales 0.0 41551.0 41551.0 5773.818972 5744.0 3849.924283 0.641460 1.778375\n3 customers 0.0 7388.0 7388.0 633.145946 609.0 464.411506 1.598650 7.091773\n4 open 0.0 1.0 1.0 0.830107 1.0 0.375539 -1.758045 1.090723\n5 promo 0.0 1.0 1.0 0.381515 0.0 0.485758 0.487838 -1.762018\n6 school_holiday 0.0 1.0 1.0 0.178647 0.0 0.383056 1.677842 0.815154\n7 competition_distance 20.0 250000.0 249980.0 6065.307828 2330.0 14639.818286 12.406761 198.365569\n8 competition_open_since_month 1.0 12.0 11.0 6.786849 7.0 3.311085 -0.042076 -1.232607\n9 competition_open_since_year 1900.0 2015.0 115.0 2010.324840 2012.0 5.515591 -7.235657 124.071304\n10 promo2 0.0 1.0 1.0 0.500564 1.0 0.500000 -0.002255 -1.999999\n11 promo2_since_week 1.0 52.0 51.0 23.619033 22.0 14.310057 0.178723 -1.184046\n12 promo2_since_year 1.0 2015.0 2014.0 1018.981976 2009.0 993.944489 -0.002570 -1.999567\n13 is_promo 0.0 1.0 1.0 0.165966 0.0 0.372050 1.795644 1.224338\n" ], [ "sns.displot(df1['sales'])", "_____no_output_____" ] ], [ [ "### 1.7.2 Categorical Attributes", "_____no_output_____" ] ], [ [ "cat_attributes.apply(lambda x: x.unique().shape[0])", "_____no_output_____" ], [ "aux1 = df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)]\n\nplt.subplot(1, 3, 1)\nsns.boxplot(x='state_holiday', y='sales', data=aux1)\n\nplt.subplot(1, 3, 2)\nsns.boxplot(x='store_type', y='sales', data=aux1)\n\nplt.subplot(1, 3, 3)\nsns.boxplot(x='assortment', y='sales', data=aux1)", "_____no_output_____" ] ], [ [ "# 2.0. FEATURE ENGINEERING", "_____no_output_____" ] ], [ [ "df2 = df1.copy()\ndf2.to_csv(home_path+interim_data_path+'df2.csv')", "_____no_output_____" ] ], [ [ "## 2.1. Hypothesis Mind Map", "_____no_output_____" ] ], [ [ "Image(home_path+figures+'mind-map-hypothesis.png')", "_____no_output_____" ] ], [ [ "## 2.2 Creating hypotheses", "_____no_output_____" ], [ "### 2.2.1 Store Hypotheses", "_____no_output_____" ], [ "**1.** Stores with larger staff should sell more.\n\n**2.** Stores with more inventory should sell more.\n\n**3.** Stores with close competitors should sell less.\n\n**4.** Stores with a larger assortment should sell more.\n\n**5.** Stores with more employees should sell more.\n\n**6.** Stores with longer-term competitors should sell more.", "_____no_output_____" ], [ "### 2.2.2 Product Hypotheses", "_____no_output_____" ], [ "**1.** Stores with more marketing investment should sell more.\n\n**2.** Stores with more products on display should sell more.\n\n**3.** Stores that have cheaper products should sell more.\n\n**4.** Stores that have more inventory should sell more.\n\n**5.** Stores that do more promotions should sell more.\n\n**6.** Stores with promotions active for longer should sell more.\n\n**7.** Stores with more promotion days should sell more.\n\n**8.** Stores with more consecutive promotions should sell more.\n", "_____no_output_____" ], [ "### 2.2.3 Temporal Hypotheses", "_____no_output_____" ], [ "**1.** Stores that have more holidays should sell less.\n\n**2.** Stores that open within the first six months should sell more.\n\n**3.** Stores that open on weekends should sell more.\n\n**4.** Stores open during the Christmas holiday should sell more.\n\n**5.** Stores should sell more over the years.\n\n**6.** Stores should sell more after the 10th of each month.\n\n**7.** Stores should sell more in the second half of the year.\n\n**8.** Stores should sell less on weekends.\n\n**8.** Stores should sell less during school holidays.", "_____no_output_____" ], [ "## 2.3. Final List of Hypotheses", "_____no_output_____" ], [ "**1.** Stores with close competitors should sell less.\n\n**2.** Stores with a larger assortment should sell more.\n\n**3.** Stores with longer-term competitors should sell more.\n\n**4.** Stores with promotions active for longer should sell more.\n\n**5.** Stores with more promotion days should sell more.\n\n**6.** Stores with more consecutive promotions should sell more.\n\n**7.** Stores open during the Christmas holiday should sell more.\n\n**8.** Stores should sell more over the years.\n\n**9.** Stores should sell more after the 10th of each month.\n\n**10.** Stores should sell more in the second half of the year.\n\n**11.** Stores should sell less on weekends.\n\n**12.** Stores should sell less during school holidays.\n\n\n", "_____no_output_____" ], [ "## 2.4. Feature Engineering", "_____no_output_____" ], [ "# RODAR DE NOVO O FEATURE ENGINEERING SEPARADAMENTE PARA CADA VARIÁVEL", "_____no_output_____" ] ], [ [ "# year\ndf2['year'] = df2['date'].dt.year\n\n# month\ndf2['month'] = df2['date'].dt.month\n\n# day\ndf2['day'] = df2['date'].dt.day\n\n# week of year\ndf2['week_of_year'] = df2['date'].dt.isocalendar().week\n\n# year week\ndf2['year_week'] = df2['date'].dt.strftime('%Y-%W')\n\n# competition since\n# I have competition measured in months and years. \n# Now I'm going to put the two together and create a date.\ndf2['competition_since'] = df2.apply(\n lambda x: datetime.datetime(\n year=x['competition_open_since_year'], \n month=x['competition_open_since_month'], \n day=1), \n axis=1)\n\n## competition_time_month\ndf2['competition_time_month'] = ((df2['date'] - df2['competition_since'])/30).apply(lambda x: x.days).astype('int64')\n\n# promo since\ndf2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)\nprint(df2['promo_since'].sample())\n\ndf2['promo_since'] = df2['promo_since'].apply(lambda x: datetime.datetime.strptime(x + '-1', '%Y-%W') - datetime.timedelta(days=7))\n\n# promo_time_week\ndf2['promo_time_week'] = ((df2['date'] - df2['promo_since'])/7).apply(lambda x: x.days).astype('int64')\n\n# assortment\ndf2['assortment'] = df2['assortment'].apply(lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended')\n\n# state holiday\n", "793271 2013-5\nName: promo_since, dtype: object\n" ], [ "df2.head().T", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
d05a9f077fa31fdda12dcae980d07e277d05c3f5
19,000
ipynb
Jupyter Notebook
ETL_create_database.ipynb
maadpeal/Movies-ETL
eb3624e1e245b7aeb61d095bb60cec34caccf5fc
[ "MIT" ]
null
null
null
ETL_create_database.ipynb
maadpeal/Movies-ETL
eb3624e1e245b7aeb61d095bb60cec34caccf5fc
[ "MIT" ]
null
null
null
ETL_create_database.ipynb
maadpeal/Movies-ETL
eb3624e1e245b7aeb61d095bb60cec34caccf5fc
[ "MIT" ]
null
null
null
53.221289
196
0.598158
[ [ [ "import json\nimport pandas as pd\nimport numpy as np\n\nimport re\n\nfrom sqlalchemy import create_engine\nimport psycopg2\n\nfrom config import db_password\n\nimport time", "_____no_output_____" ], [ "# Add the clean movie function that takes in the argument, \"movie\".\ndef clean_movie(movie):\n movie = dict(movie) #create a non-destructive copy\n alt_titles = {}\n for key in ['Also known as','Arabic','Cantonese','Chinese','French',\n 'Hangul','Hebrew','Hepburn','Japanese','Literally',\n 'Mandarin','McCune–Reischauer','Original title','Polish',\n 'Revised Romanization','Romanized','Russian',\n 'Simplified','Traditional','Yiddish']:\n if key in movie:\n alt_titles[key] = movie[key]\n movie.pop(key)\n if len(alt_titles) > 0:\n movie['alt_titles'] = alt_titles\n # merge column names\n def change_column_name(old_name, new_name):\n if old_name in movie:\n movie[new_name] = movie.pop(old_name)\n change_column_name('Adaptation by', 'Writer(s)')\n change_column_name('Country of origin', 'Country')\n change_column_name('Directed by', 'Director')\n change_column_name('Distributed by', 'Distributor')\n change_column_name('Edited by', 'Editor(s)')\n change_column_name('Length', 'Running time')\n change_column_name('Original release', 'Release date')\n change_column_name('Music by', 'Composer(s)')\n change_column_name('Produced by', 'Producer(s)')\n change_column_name('Producer', 'Producer(s)')\n change_column_name('Productioncompanies ', 'Production company(s)')\n change_column_name('Productioncompany ', 'Production company(s)')\n change_column_name('Released', 'Release Date')\n change_column_name('Release Date', 'Release date')\n change_column_name('Screen story by', 'Writer(s)')\n change_column_name('Screenplay by', 'Writer(s)')\n change_column_name('Story by', 'Writer(s)')\n change_column_name('Theme music composer', 'Composer(s)')\n change_column_name('Written by', 'Writer(s)')\n return movie", "_____no_output_____" ], [ "# 1 Add the function that takes in three arguments;\n# Wikipedia data, Kaggle metadata, and MovieLens rating data (from Kaggle)\n\ndef extract_transform_load(wiki_file, kaggle_file, ratings_file):\n # Read in the kaggle metadata and MovieLens ratings CSV files as Pandas DataFrames.\n kaggle_metadata = pd.read_csv(kaggle_file, low_memory=False)\n ratings = pd.read_csv(ratings_file)\n\n # Open and read the Wikipedia data JSON file.\n with open(wiki_file, mode='r') as file:\n wiki_movies_raw = json.load(file)\n \n # Write a list comprehension to filter out TV shows.\n wiki_movies = [\n movie for movie in wiki_movies_raw\n if ('Director' in movie or 'Directed by' in movie)\n and 'imdb_link' in movie\n ]\n\n # Write a list comprehension to iterate through the cleaned wiki movies list\n # and call the clean_movie function on each movie.\n clean_movies = [clean_movie(movie) for movie in wiki_movies]\n\n # Read in the cleaned movies list from Step 4 as a DataFrame.\n wiki_movies_df = pd.DataFrame(clean_movies)\n\n # Write a try-except block to catch errors while extracting the IMDb ID using a regular expression string and\n # dropping any imdb_id duplicates. If there is an error, capture and print the exception.\n try:\n wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\\d{7})')\n wiki_movies_df = wiki_movies_df.drop_duplicates(['imdb_id'],keep='first')\n except:\n print('An error occurred while getting the id or deleting the duplicates')\n \n # Write a list comprehension to keep the columns that don't have null values from the wiki_movies_df DataFrame.\n wiki_columns_to_keep = [column for column in wiki_movies_df.columns if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9]\n wiki_movies_df = wiki_movies_df[wiki_columns_to_keep]\n\n # Create a variable that will hold the non-null values from the “Box office” column.\n box_office = wiki_movies_df['Box office'].dropna()\n \n # Convert the box office data created in Step 8 to string values using the lambda and join functions.\n box_office = box_office.apply(lambda x: ' '.join(x) if type(x) == list else x)\n box_office = box_office.str.replace(r'\\$.*[-—–](?![a-z])', '$', regex=True)\n\n # Write a regular expression to match the six elements of \"form_one\" of the box office data.\n form_one = r'\\$\\s*\\d+\\.?\\d*\\s*[mb]illi?on'\n # Write a regular expression to match the three elements of \"form_two\" of the box office data.\n form_two = r'\\$\\s*\\d{1,3}(?:[,\\.]\\d{3})+(?!\\s[mb]illion)'\n\n # Add the parse_dollars function.\n def parse_dollars(s):\n if type(s) != str:\n return np.nan\n\n if re.match(r'\\$\\s*\\d+\\.?\\d*\\s*milli?on', s, flags=re.IGNORECASE):\n s = re.sub('\\$|\\s|[a-zA-Z]','', s)\n value = float(s) * 10**6\n return value\n\n elif re.match(r'\\$\\s*\\d+\\.?\\d*\\s*billi?on', s, flags=re.IGNORECASE):\n s = re.sub('\\$|\\s|[a-zA-Z]','', s)\n value = float(s) * 10**9\n return value\n\n elif re.match(r'\\$\\s*\\d{1,3}(?:[,\\.]\\d{3})+(?!\\s[mb]illion)', s, flags=re.IGNORECASE):\n s = re.sub('\\$|,','', s)\n value = float(s)\n return value\n else:\n return np.nan\n \n # Clean the box office column in the wiki_movies_df DataFrame.\n wiki_movies_df['box_office'] = box_office.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)\n wiki_movies_df.drop('Box office', axis=1, inplace=True)\n \n # Clean the budget column in the wiki_movies_df DataFrame.\n budget = wiki_movies_df['Budget'].dropna()\n budget = budget.map(lambda x: ' '.join(x) if type(x) == list else x)\n budget = budget.str.replace(r'\\$.*[-—–](?![a-z])', '$', regex=True)\n budget = budget.str.replace(r'\\[\\d+\\]\\s*', '')\n wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)\n\n\n # Clean the release date column in the wiki_movies_df DataFrame.\n release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)\n date_form_one = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\\s[123]?\\d,\\s\\d{4}'\n date_form_two = r'\\d{4}.[01]\\d.[0123]\\d'\n date_form_three = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\\s\\d{4}'\n date_form_four = r'\\d{4}'\n wiki_movies_df['release_date'] = pd.to_datetime(release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})')[0], infer_datetime_format=True)\n\n # Clean the running time column in the wiki_movies_df DataFrame.\n running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)\n running_time_extract = running_time.str.extract(r'(\\d+)\\s*ho?u?r?s?\\s*(\\d*)|(\\d+)\\s*m')\n running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0)\n wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1)\n wiki_movies_df.drop('Running time', axis=1, inplace=True)\n \n # 2. Clean the Kaggle metadata.\n kaggle_metadata = kaggle_metadata[kaggle_metadata['adult'] == 'False'].drop('adult',axis='columns')\n kaggle_metadata['video'] = kaggle_metadata['video'] == 'True'\n kaggle_metadata = kaggle_metadata[~kaggle_metadata['budget'].str.contains(r'/\\D')]\n kaggle_metadata['budget'] = kaggle_metadata['budget'].astype(int)\n kaggle_metadata['id'] = pd.to_numeric(kaggle_metadata['id'], errors='raise')\n kaggle_metadata['popularity'] = pd.to_numeric(kaggle_metadata['popularity'], errors='raise')\n kaggle_metadata['release_date'] = pd.to_datetime(kaggle_metadata['release_date'])\n \n ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')\n \n # 3. Merged the two DataFrames into the movies DataFrame.\n movies_df = pd.merge(wiki_movies_df, kaggle_metadata, on='imdb_id', suffixes=['_wiki','_kaggle'])\n\n # 4. Drop unnecessary columns from the merged DataFrame.\n movies_df.drop(columns=['title_wiki','release_date_wiki','Language','Production company(s)'], inplace=True)\n\n # 5. Add in the function to fill in the missing Kaggle data.\n def fill_missing_kaggle_data(df, kaggle_column, wiki_column):\n df[kaggle_column] = df.apply(\n lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column]\n , axis=1)\n df.drop(columns=wiki_column, inplace=True)\n\n\n # 6. Call the function in Step 5 with the DataFrame and columns as the arguments.\n fill_missing_kaggle_data(movies_df, 'runtime', 'running_time')\n fill_missing_kaggle_data(movies_df, 'budget_kaggle', 'budget_wiki')\n fill_missing_kaggle_data(movies_df, 'revenue', 'box_office')\n movies_df['video'].value_counts(dropna=False)\n\n # 7. Filter the movies DataFrame for specific columns.\n movies_df = movies_df.loc[:, ['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',\n 'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',\n 'genres','original_language','overview','spoken_languages','Country',\n 'production_companies','production_countries','Distributor',\n 'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on'\n ]]\n\n # 8. Rename the columns in the movies DataFrame.\n movies_df.rename({'id':'kaggle_id',\n 'title_kaggle':'title',\n 'url':'wikipedia_url',\n 'budget_kaggle':'budget',\n 'release_date_kaggle':'release_date',\n 'Country':'country',\n 'Distributor':'distributor',\n 'Producer(s)':'producers',\n 'Director':'director',\n 'Starring':'starring',\n 'Cinematography':'cinematography',\n 'Editor(s)':'editors',\n 'Writer(s)':'writers',\n 'Composer(s)':'composers',\n 'Based on':'based_on'\n }, axis='columns', inplace=True)\n\n # 9. Transform and merge the ratings DataFrame.\n rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count()\n rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \\\n .rename({'userId':'count'}, axis=1)\n rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \\\n .rename({'userId':'count'}, axis=1) \\\n .pivot(index='movieId',columns='rating', values='count')\n rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns]\n movies_with_ratings_df = pd.merge(movies_df, rating_counts, left_on='kaggle_id', right_index=True, how='left')\n movies_with_ratings_df[rating_counts.columns] = movies_with_ratings_df[rating_counts.columns].fillna(0)\n \n db_string = f\"postgresql://postgres:{db_password}@127.0.0.1:5432/movie_data\"\n engine = create_engine(db_string)\n \n start_time_movies = time.time()\n movies_df.to_sql(name='movies', con=engine, if_exists='replace')\n print(f'Done movies. {time.time() - start_time_movies} total seconds elapsed')\n \n rows_imported = 0\n start_time = time.time()\n for data in pd.read_csv(f'./Resources/ratings.csv', chunksize=1000000):\n print(f'importing rows {rows_imported} to {rows_imported + len(data)}...', end='')\n data.to_sql(name='ratings', con=engine, if_exists='append')\n rows_imported += len(data)\n\n print(f'Done ratings. {time.time() - start_time} total seconds elapsed') ", "_____no_output_____" ], [ "# 10. Create the path to your file directory and variables for the three files.\nfile_dir = './Resources'\n# The Wikipedia data\nwiki_file = f'{file_dir}/wikipedia-movies.json'\n# The Kaggle metadata\nkaggle_file = f'{file_dir}/movies_metadata.csv'\n# The MovieLens rating data.\nratings_file = f'{file_dir}/ratings.csv'", "_____no_output_____" ], [ "# 11. Set the three variables equal to the function created in D1.\nextract_transform_load(wiki_file, kaggle_file, ratings_file)", "/home/adrian/anaconda3/envs/PythonData/lib/python3.7/site-packages/ipykernel_launcher.py:81: FutureWarning: The default value of regex will change from True to False in a future version.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d05ab6fb6420344707b02b9a0bfb9e25e157fd76
414,950
ipynb
Jupyter Notebook
jupyter/09_ellipsoid_fruit_fitting.ipynb
amezqui3/vitaminC_morphology
c2a59ce83248bbc6966dd63532cb466192ce0600
[ "MIT" ]
null
null
null
jupyter/09_ellipsoid_fruit_fitting.ipynb
amezqui3/vitaminC_morphology
c2a59ce83248bbc6966dd63532cb466192ce0600
[ "MIT" ]
null
null
null
jupyter/09_ellipsoid_fruit_fitting.ipynb
amezqui3/vitaminC_morphology
c2a59ce83248bbc6966dd63532cb466192ce0600
[ "MIT" ]
null
null
null
617.485119
237,608
0.942422
[ [ [ "<style type=\"text/css\">\n.tg {border-collapse:collapse;border-spacing:0;}\n.tg td{border-color:white;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;\n overflow:hidden;padding:10px 5px;word-break:normal;}\n.tg th{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;\n font-weight:normal;overflow:hidden;padding:10px 5px;word-break:normal;}\n.tg .tg-baqh{text-align:left;vertical-align:top}\n</style>\n\n# 09 Compute best-fit ellipsoid approximation of the whole fruit\n\nWe now go for macro modeling.\n\nFor each fruit, a point cloud, a collection of $(x,y,z)$ coordinates in the space, was defined by the centers of all its individual oil glands.\n- We compute an ellipsoid that fits the best this point cloud\n- To that end, we do just an ordinary least square fit to find the best coefficients of the respective quadratic equation that approximate most of the point cloud points.\n- The algebraic-fit ellipsoid was adapted from [Li and Griffiths (2004)](https://doi.org/10.1109/GMAP.2004.1290055). \n- This produces a 10-dimensional vector that algebraically defines an ellipsoid. \n - See [Panou et al. (2020)](https://doi.org/10.1515/jogs-2020-0105) on how to convert this vector into geometric parameters.\n\n<table class=\"tg\">\n\n<tbody>\n <tr>\n <td class=\"tg-baqh\" style=\"text-align:left\">\n <img src=\"https://www.egr.msu.edu/~amezqui3/citrus/figs/SW01_CRC3030_12B-8-5_L02_frontal_ell_projection.jpg\" style=\"width:500px\">\n <p style=\"text-align:center;font-size:20px\">Approximating a sweet orange</p>\n </td>\n <td class=\"tg-baqh\" style=\"text-align:left\">\n <img src = \"https://www.egr.msu.edu/~amezqui3/citrus/figs/SR01_CRC3289_12B-19-9_L02_frontal_ell_projection.jpg\" alt = \"barley\" style=\"width:500px;\"/>\n <p style=\"text-align:center;font-size:20px\">Approximating a sour orange</p>\n </td>\n </tr>\n</tbody>\n</table>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport glob\nimport os\n\nimport tifffile as tf\nfrom importlib import reload\n\nimport warnings\nwarnings.filterwarnings( \"ignore\")\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport citrus_utils as vitaminC", "_____no_output_____" ] ], [ [ "### Define the appropriate base/root name and label name\n\n- This is where having consistent file naming pays off", "_____no_output_____" ] ], [ [ "tissue_src = '../data/tissue/'\noil_src = '../data/oil/'\nbnames = [os.path.split(x)[-1] for x in sorted(glob.glob(oil_src + 'WR*'))]\nfor i in range(len(bnames)):\n print(i, '\\t', bnames[i])", "0 \t WR05_CRC3605_18B-19-5\n" ], [ "bname = bnames[0]\nL = 3\n\nlname = 'L{:02d}'.format(L)\nrotateby = [2,1,0]", "_____no_output_____" ] ], [ [ "### Load voxel-size data\n\n- The micron size of each voxel depends on the scanning parameters", "_____no_output_____" ] ], [ [ "voxel_filename = '../data/citrus_voxel_size.csv'\nvoxel_size = pd.read_csv(voxel_filename)\nvoxsize = (voxel_size.loc[voxel_size.ID == bname, 'voxel_size_microns'].values)[0]\nprint('Each voxel is of side', voxsize, 'microns')", "Each voxel is of side 57.5 microns\n" ] ], [ [ "## Load oil gland centers and align based on spine\n\n- From the previous step, retrieve the `vh` rotation matrix to align the fruit\n- The point cloud is made to have mean zero and it is scaled according to its voxel size\n- The scale now should be in cm\n- Plot 2D projections of the oil glands to make sure the fruit is standing upright after rotation", "_____no_output_____" ] ], [ [ "savefig= False\nfilename = tissue_src + bname + '/' + lname + '/' + bname + '_' + lname + '_vh_alignment.csv'\nvh = np.loadtxt(filename, delimiter = ',')\nprint(vh)", "[[ 9.92111916e-01 -9.20178175e-02 8.51273630e-02]\n [-9.22832159e-02 -9.95732461e-01 -8.20533131e-04]\n [ 8.48395824e-02 -7.04176612e-03 -9.96369740e-01]]\n" ], [ "oil_dst = oil_src + bname + '/' + lname + '/'\nfilename = oil_dst + bname + '_' + lname + '_glands.csv'\nglands = np.loadtxt(filename, delimiter=',', dtype=float)\n\nglands = np.matmul(glands, np.transpose(vh))\ncenterby = np.mean(glands, axis = 0)\nscaleby = 1e4/voxsize\nglands = (glands - centerby)/scaleby\n\ndst = oil_src + bname + '/'\nvitaminC.plot_3Dprojections(glands, title=bname+'_'+lname, writefig=savefig, dst=dst)", "_____no_output_____" ] ], [ [ "# Compute the general conic parameters\n\nHere we follow the algorithm laid out by [Li and Griffiths (2004)](https://doi.org/10.1109/GMAP.2004.1290055). \n\n\nA general quadratic surface is defined by the equation\n\n$$\\eqalignno{ & ax^{2}+by^{2}+cz^{2}+2fxy+2gyz+2hzy\\ \\ \\ \\ \\ \\ \\ \\ \\ &\\hbox{(1)}\\cr &+2px+2qy+2rz+d=0.}$$\n\nLet $$\\rho = \\frac{4J-I}{a^2 + b^2 + c^2},$$\n\n$$\\eqalignno{ &I = a+b+c &\\hbox{(2)}\\cr &J =ab+bc+ac-f^{2}-g^{2}-h^{2}&\\hbox {(3)}\\cr & K=\\left[\\matrix{ a & h & g \\cr h & b & f \\cr g & f & c }\\right] &\\hbox{(4)}}.$$\n\nThese values are invariant under rotation and translation and equation (1) represents an ellipsoid if $J > 0$ and $IK>0$.\n\nWith our observations $\\{(x_i,y_i,z_i)\\}_i$, we would ideally want a vector of parameters $(a,b,c,f,g,h,p,q,r,d)$ such that\n\n$$\n\\begin{pmatrix}\nx_1^2 & y_1^2 & z_1^2 & 2x_1y_1 & 2y_1z_1 & 2x_1z_1 & x_1 & y_1 & z_1 & 1\\\\\nx_2^2 & y_2^2 & z_2^2 & 2x_2y_2 & 2y_2z_2 & 2x_2z_2 & x_2 & y_2 & z_2 & 1\\\\\n\\vdots& \\vdots& \\vdots& \\vdots & \\vdots & \\vdots & \\vdots & \\vdots & \\vdots \\\\\nx_n^2 & y_n^2 & z_n^2 & 2x_ny_n & 2y_nz_n & 2x_nz_n & x_n & y_n & z_n & 1\n\\end{pmatrix}\n\\begin{pmatrix}\na \\\\ b \\\\ \\vdots \\\\ d\n\\end{pmatrix}\n=\n\\begin{pmatrix}\n0 \\\\ 0 \\\\ \\vdots \\\\ 0\n\\end{pmatrix}\n$$\nor\n$$\n\\mathbf{D}\\mathbf{v} = 0\n$$\n\nThe solution to the system above can be obtained via Lagrange multipliers\n\n$$\\min_{\\mathbf{v}\\in\\mathbb{R}^{10}}\\left\\|\\mathbf{D}\\mathbf{v}\\right\\|^2, \\quad \\mathrm{s.t.}\\; kJ - I^2 = 1$$\n\nIf $k=4$, the resulting vector $\\mathbf{v}$ is guaranteed to be an ellipsoid. \n\n- Experimental results suggest that the optimization problem also yields ellipsoids for higher $k$'s if there are enough sample points.\n\n---\n\n- This whole procedure yields a 10-dimensional vector $(a,b,c,f,g,h,p,q,r,1)$, which is then translated to geometric parameters as shown in [Panou et al. (2020)](https://doi.org/10.1515/jogs-2020-0105)\n\nWe obtain finally a `6 x 3` matrix with all the geometric parameters\n```\n[ x,y,z coordinates of ellipsoid center ]\n[ semi-axes lengths ]\n[ | ]\n[ -- 3 x 3 rotation matrix -- ]\n[ | ]\n[ x,y,z rotation angles ]\n```", "_____no_output_____" ] ], [ [ "np.vstack(tuple(ell_params.values())).shape", "_____no_output_____" ], [ "bbox = (np.max(glands, axis=0) - np.min(glands, axis=0))*.5\nguess = np.argsort(np.argsort(bbox))\nprint(bbox)\nprint(guess[rotateby])\nbbox[rotateby]", "[1.05911753 1.08946973 1.09470996]\n[2 1 0]\n" ], [ "datapoints = glands.T\n\nfilename = oil_src + bname + '/' + lname + '/' + bname + '_' + lname + '_vox_v_ell.csv'\nell_v_params, flag = vitaminC.ell_algebraic_fit2(datapoints, k=4)\nprint(np.around(ell_v_params,3), '\\n', flag, 'ellipsoid\\n')\nnp.savetxt(filename, ell_v_params, delimiter=',')\n\nfilename = oil_src + bname + '/' + lname + '/' + bname + '_' + lname + '_vox_m_ell.csv'\nell_params = vitaminC.get_ell_params_from_vector(ell_v_params, guess[rotateby])\nnp.savetxt(filename, np.vstack(tuple(ell_params.values())), delimiter=',')\n \nell_params", "[ 0.953 0.879 0.883 0.027 -0.036 -0.006 0.038 -0.043 0.072 -1. ] \n True ellipsoid\n\n" ], [ "oil_src + bname + '/' + lname + '/' + bname + '_' + lname + '_ell_m.csv'", "_____no_output_____" ] ], [ [ "## Project the oil gland centers to the best-fit ellipsoid\n\n- The oil gland point cloud is translated to the center of the best-fit ellipsoid.\n- Projection will be **geocentric**: trace a ray from the origin to the oil gland and see where it intercepts the ellipsoid.\n\nAdditionally, we can compute these projection in terms of geodetic coordinates:\n- longitude $\\lambda\\in[-\\pi,\\pi]$\n- latitude $\\phi\\in[-\\frac\\pi2,\\frac\\pi2]$ \n- See [Diaz-Toca _et al._ (2020)](https://doi.org/10.1016/j.cageo.2020.104551) for more details.\n\nThe geodetic coordinates are invariant with respect to ellipsoid size, as long as the ratio between its semi-major axes lengths remains constant.\n\n- These geodetic coordinates are a natural way to translate our data to the sphere\n- Later, it will allow us to draw machinery from directional statistics ([Pewsey and García-Portugués, 2021](https://doi.org/10.1007/s11749-021-00759-x)).\n\nResults are saved in a `N x 3` matrix, where `N` is the number of individual oil glands\n- Each row of the matrix is\n```\n[ longitude latitude residue ]\n```\n- The residue is the perpendicular distance from the oil gland to the ellipsoid surface.", "_____no_output_____" ] ], [ [ "footpoints = 'geocentric'\n\n_, xyz = vitaminC.get_footpoints(datapoints, ell_params, footpoints)\nrho = vitaminC.ell_rho(ell_params['axes'])\nprint(rho)\n\neglands = xyz - ell_params['center'].reshape(-1,1)\neglands = eglands[rotateby]\ncglands = datapoints - ell_params['center'].reshape(-1,1)\ncglands = cglands[rotateby]\n\neglands_params = {'center': np.zeros(len(eglands)),\n 'axes': ell_params['axes'],\n 'rotation': np.identity(len(eglands))}\ngeodetic, _ = vitaminC.get_footpoints(eglands, eglands_params, footpoints)\n\nfilename = oil_dst + bname + '_' + lname + '_' + footpoints + '.csv'\nnp.savetxt(filename, geodetic.T, delimiter=',')\nprint('Saved', filename)", "0.9944746932348365\nSaved ../data/oil/WR05_CRC3605_18B-19-5/L03/WR05_CRC3605_18B-19-5_L03_geocentric.csv\n" ], [ "pd.DataFrame(geodetic.T).describe()", "_____no_output_____" ] ], [ [ "### Plot the best-fit ellipsoid sphere and the gland projections\n\n- Visual sanity check", "_____no_output_____" ] ], [ [ "domain_lon = [-np.pi, np.pi]\ndomain_lat = [-.5*np.pi, 0.5*np.pi]\nlonN = 25\nlatN = 25\n\nlongitude = np.linspace(*domain_lon, lonN)\nlatitude = np.linspace(*domain_lat, latN)\n\nshape_lon, shape_lat = np.meshgrid(longitude, latitude)\nlonlat = np.vstack((np.ravel(shape_lon), np.ravel(shape_lat)))\n\necoords = vitaminC.ellipsoid(*(lonlat), *ell_params['axes'])", "_____no_output_____" ], [ "title = bname + '_' + lname + ' - ' + footpoints.title() + ' projection'\nmarkersize = 2\nsidestep = np.min(bbox)\nalpha = .5\nfs = 20\n\nfilename = oil_dst + '_'.join(np.array(title.split(' '))[[0,2]])\n\nvitaminC.plot_ell_comparison(cglands, eglands, ecoords, title, sidestep, savefig=savefig, filename=filename)", "_____no_output_____" ] ], [ [ "# References\n\n- **Diaz-Toca, GM**, **Marin, L**, **Necula, I** (2020) Direct transformation from Cartesian into geodetic coordinates on a triaxial ellipsoid. _Computers & Geosciences_ **142**, 104551. [DOI: 10.1016/j.cageo.2020.104551](https://doi.org/10.1016/j.cageo.2020.104551)\n\n- **Li, Q**, **Griffiths, J** (2004) Least squares ellipsoid specific fitting. _Geometric Modeling and Processing. Proceedings, 2004_. 335-340. [DOI: 10.1109/GMAP.2004.1290055](https://doi.org/10.1109/GMAP.2004.1290055)\n\n- **Panou, G**, **Korakitis, R**, **Pantazis, G** (2020) Fitting a triaxial ellipsoid to a geoid model. _Journal of Geodetic Science_ **10**(1), 69-82. [DOI: 10.1515/jogs-2020-0105](https://doi.org/10.1515/jogs-2020-0105)\n\n- **Pewsey, A**, **García-Portugués, E** (2021) Recent advances in directional statistics. _TEST_ **30**(1), 1-58 [DOI: 10.1007/s11749-021-00759-x](https://doi.org/10.1007/s11749-021-00759-x)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d05ab79232366a62af1958fa06a7ff32ccae228c
104,728
ipynb
Jupyter Notebook
notebooks/end-to-end-structured/labs/.ipynb_checkpoints/4c_keras_wide_and_deep_babyweight-checkpoint.ipynb
jfesteban/Google-ASL
8e991a437e348b1950cdc351dba39e2d40a6b08f
[ "Apache-2.0" ]
null
null
null
notebooks/end-to-end-structured/labs/.ipynb_checkpoints/4c_keras_wide_and_deep_babyweight-checkpoint.ipynb
jfesteban/Google-ASL
8e991a437e348b1950cdc351dba39e2d40a6b08f
[ "Apache-2.0" ]
null
null
null
notebooks/end-to-end-structured/labs/.ipynb_checkpoints/4c_keras_wide_and_deep_babyweight-checkpoint.ipynb
jfesteban/Google-ASL
8e991a437e348b1950cdc351dba39e2d40a6b08f
[ "Apache-2.0" ]
null
null
null
129.293827
38,836
0.84497
[ [ [ "# LAB 4c: Create Keras Wide and Deep model.\n\n**Learning Objectives**\n\n1. Set CSV Columns, label column, and column defaults\n1. Make dataset of features and label from CSV files\n1. Create input layers for raw features\n1. Create feature columns for inputs\n1. Create wide layer, deep dense hidden layers, and output layer\n1. Create custom evaluation metric\n1. Build wide and deep model tying all of the pieces together\n1. Train and evaluate\n\n\n## Introduction \nIn this notebook, we'll be using Keras to create a wide and deep model to predict the weight of a baby before it is born.\n\nWe'll start by defining the CSV column names, label column, and column defaults for our data inputs. Then, we'll construct a tf.data Dataset of features and the label from the CSV files and create inputs layers for the raw features. Next, we'll set up feature columns for the model inputs and build a wide and deep neural network in Keras. We'll create a custom evaluation metric and build our wide and deep model. Finally, we'll train and evaluate our model.\n\nEach learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/4c_keras_wide_and_deep_babyweight.ipynb).", "_____no_output_____" ], [ "## Load necessary libraries", "_____no_output_____" ] ], [ [ "import datetime\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nprint(tf.__version__)", "2.1.1\n" ] ], [ [ "## Verify CSV files exist\n\nIn the seventh lab of this series [4a_sample_babyweight](../solutions/4a_sample_babyweight.ipynb), we sampled from BigQuery our train, eval, and test CSV files. Verify that they exist, otherwise go back to that lab and create them.", "_____no_output_____" ] ], [ [ "%%bash\nls *.csv", "eval.csv\ntest.csv\ntrain.csv\n" ], [ "%%bash\nhead -5 *.csv", "==> eval.csv <==\n6.3118345610599995,Unknown,35,Single(1),38\n5.43659938092,Unknown,21,Multiple(2+),35\n7.43839671988,Unknown,20,Single(1),40\n6.37576861704,Unknown,27,Multiple(2+),34\n7.62358501996,True,30,Single(1),38\n\n==> test.csv <==\n6.9996768185,Unknown,20,Single(1),39\n6.9996768185,Unknown,26,Single(1),37\n7.93443680938,Unknown,25,Single(1),39\n5.5005334369,Unknown,35,Single(1),34\n7.31052860792,Unknown,26,Single(1),39\n\n==> train.csv <==\n6.1883756943399995,Unknown,24,Single(1),38\n9.39389698382,Unknown,25,Single(1),38\n7.81318256528,Unknown,32,Single(1),41\n6.75055446244,True,21,Single(1),36\n8.12623897732,True,34,Single(1),40\n" ] ], [ [ "## Create Keras model", "_____no_output_____" ], [ "### Lab Task #1: Set CSV Columns, label column, and column defaults.\n\nNow that we have verified that our CSV files exist, we need to set a few things that we will be using in our input function.\n* `CSV_COLUMNS` are going to be our header names of our columns. Make sure that they are in the same order as in the CSV files\n* `LABEL_COLUMN` is the header name of the column that is our label. We will need to know this to pop it from our features dictionary.\n* `DEFAULTS` is a list with the same length as `CSV_COLUMNS`, i.e. there is a default for each column in our CSVs. Each element is a list itself with the default value for that CSV column.", "_____no_output_____" ] ], [ [ "# Determine CSV, label, and key columns\n# TODO: Create list of string column headers, make sure order matches.\nCSV_COLUMNS = [\"weight_pounds\", \"is_male\", \"mother_age\", \"plurality\", \"gestation_weeks\"]\n\n# TODO: Add string name for label column\nLABEL_COLUMN = \"weight_pounds\"\n\n# Set default values for each CSV column as a list of lists.\n# Treat is_male and plurality as strings.\nDEFAULTS = [[0.0], [\"null\"], [0.0], [\"null\"], [0.0]]", "_____no_output_____" ] ], [ [ "### Lab Task #2: Make dataset of features and label from CSV files.\n\nNext, we will write an input_fn to read the data. Since we are reading from CSV files we can save ourself from trying to recreate the wheel and can use `tf.data.experimental.make_csv_dataset`. This will create a CSV dataset object. However we will need to divide the columns up into features and a label. We can do this by applying the map method to our dataset and popping our label column off of our dictionary of feature tensors.", "_____no_output_____" ] ], [ [ "def features_and_labels(row_data):\n \"\"\"Splits features and labels from feature dictionary.\n\n Args:\n row_data: Dictionary of CSV column names and tensor values.\n Returns:\n Dictionary of feature tensors and label tensor.\n \"\"\"\n label = row_data.pop(LABEL_COLUMN)\n\n return row_data, label # features, label\n\n\ndef load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):\n \"\"\"Loads dataset using the tf.data API from CSV files.\n\n Args:\n pattern: str, file pattern to glob into list of files.\n batch_size: int, the number of examples per batch.\n mode: tf.estimator.ModeKeys to determine if training or evaluating.\n Returns:\n `Dataset` object.\n \"\"\"\n # TODO: Make a CSV dataset\n dataset = tf.data.experimental.make_csv_dataset(\n file_pattern=pattern,\n batch_size=batch_size,\n column_names=CSV_COLUMNS,\n column_defaults=DEFAULTS)\n\n # TODO: Map dataset to features and label\n dataset = dataset.map(map_func=features_and_labels) # features, label\n\n # Shuffle and repeat for training\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=1000).repeat()\n\n # Take advantage of multi-threading; 1=AUTOTUNE\n dataset = dataset.prefetch(buffer_size=1)\n\n return dataset", "_____no_output_____" ] ], [ [ "### Lab Task #3: Create input layers for raw features.\n\nWe'll need to get the data read in by our input function to our model function, but just how do we go about connecting the dots? We can use Keras input layers [(tf.Keras.layers.Input)](https://www.tensorflow.org/api_docs/python/tf/keras/Input) by defining:\n* shape: A shape tuple (integers), not including the batch size. For instance, shape=(32,) indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known.\n* name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided.\n* dtype: The data type expected by the input, as a string (float32, float64, int32...)", "_____no_output_____" ] ], [ [ "def create_input_layers():\n \"\"\"Creates dictionary of input layers for each feature.\n\n Returns:\n Dictionary of `tf.Keras.layers.Input` layers for each feature.\n \"\"\"\n # TODO: Create dictionary of tf.keras.layers.Input for each dense feature\n deep_inputs = {\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"float32\")\n for colname in [\"mother_age\", \"gestation_weeks\"]}\n\n # TODO: Create dictionary of tf.keras.layers.Input for each sparse feature\n wide_inputs = {\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"string\")\n for colname in [\"is_male\", \"plurality\"]}\n\n inputs = {**wide_inputs, **deep_inputs}\n\n return inputs", "_____no_output_____" ] ], [ [ "### Lab Task #4: Create feature columns for inputs.\n\nNext, define the feature columns. `mother_age` and `gestation_weeks` should be numeric. The others, `is_male` and `plurality`, should be categorical. Remember, only dense feature columns can be inputs to a DNN.", "_____no_output_____" ] ], [ [ "def categorical_fc(name, values):\n \"\"\"Helper function to wrap categorical feature by indicator column.\n\n Args:\n name: str, name of feature.\n values: list, list of strings of categorical values.\n Returns:\n Categorical and indicator column of categorical feature.\n \"\"\"\n cat_column = tf.feature_column.categorical_column_with_vocabulary_list(\n key=name, vocabulary_list=values)\n ind_column = tf.feature_column.indicator_column(\n categorical_column=cat_column)\n\n return cat_column, ind_column\n\ndef create_feature_columns(nembeds):\n \"\"\"Creates wide and deep dictionaries of feature columns from inputs.\n\n Args:\n nembeds: int, number of dimensions to embed categorical column down to.\n Returns:\n Wide and deep dictionaries of feature columns.\n \"\"\"\n # TODO: Create deep feature columns for numeric features\n deep_fc = {\n colname: tf.feature_column.numeric_column(key=colname)\n for colname in [\"mother_age\", \"gestation_weeks\"]\n }\n\n # TODO: Create wide feature columns for categorical features\n wide_fc = {}\n is_male, wide_fc[\"is_male\"] = categorical_fc(\n \"is_male\", [\"True\", \"False\", \"Unknown\"])\n plurality, wide_fc[\"plurality\"] = categorical_fc(\n \"plurality\", [\"Single(1)\", \"Twins(2)\", \"Triplets(3)\", \"Quadruplets(4)\", \"Quintuplets(5)\", \"Multiple(2+)\"])\n \n # TODO: Bucketize the float fields. This makes them wide\n age_buckets = tf.feature_column.bucketized_column(\n source_column=deep_fc[\"mother_age\"],\n boundaries=np.arange(15, 45, 1).tolist())\n wide_fc[\"age_buckets\"] = tf.feature_column.indicator_column(\n categorical_column=age_buckets)\n \n gestation_buckets = tf.feature_column.bucketized_column(\n source_column=deep_fc[\"gestation_weeks\"],\n boundaries=np.arange(17, 47, 1).tolist())\n wide_fc[\"gestation_buckets\"] = tf.feature_column.indicator_column(\n categorical_column=gestation_buckets)\n\n # TODO: Cross all the wide cols, have to do the crossing before we one-hot\n crossed = tf.feature_column.crossed_column(\n keys=[age_buckets, gestation_buckets],\n hash_bucket_size=1000)\n\n # TODO: Embed cross and add to deep feature columns\n deep_fc[\"crosssed_embeds\"] = tf.feature_column.embedding_column(\n categorical_column=crossed, dimension=nembeds)\n\n return wide_fc, deep_fc", "_____no_output_____" ] ], [ [ "### Lab Task #5: Create wide and deep model and output layer.\n\nSo we've figured out how to get our inputs ready for machine learning but now we need to connect them to our desired output. Our model architecture is what links the two together. We need to create a wide and deep model now. The wide side will just be a linear regression or dense layer. For the deep side, let's create some hidden dense layers. All of this will end with a single dense output layer. This is regression so make sure the output layer activation is correct and that the shape is right.", "_____no_output_____" ] ], [ [ "def get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units):\n \"\"\"Creates model architecture and returns outputs.\n\n Args:\n wide_inputs: Dense tensor used as inputs to wide side of model.\n deep_inputs: Dense tensor used as inputs to deep side of model.\n dnn_hidden_units: List of integers where length is number of hidden\n layers and ith element is the number of neurons at ith layer.\n Returns:\n Dense tensor output from the model.\n \"\"\"\n # Hidden layers for the deep side\n layers = [int(x) for x in dnn_hidden_units]\n deep = deep_inputs\n\n # TODO: Create DNN model for the deep side\n for layerno, numnodes in enumerate(layers):\n deep = tf.keras.layers.Dense(\n units=numnodes,\n activation=\"relu\",\n name=\"dnn_{}\".format(layerno+1))(deep)\n deep_out = deep\n\n # TODO: Create linear model for the wide side\n wide_out = tf.keras.layers.Dense(\n units=10, activation=\"relu\", name=\"linear\")(wide_inputs)\n\n # Concatenate the two sides\n both = tf.keras.layers.concatenate(\n inputs=[deep_out, wide_out], name=\"both\")\n\n # TODO: Create final output layer\n output=tf.keras.layers.Dense(\n units=1, activation=\"linear\", name=\"weight\")(both)\n\n return output", "_____no_output_____" ] ], [ [ "### Lab Task #6: Create custom evaluation metric.\n\nWe want to make sure that we have some useful way to measure model performance for us. Since this is regression, we would like to know the RMSE of the model on our evaluation dataset, however, this does not exist as a standard evaluation metric, so we'll have to create our own by using the true and predicted labels.", "_____no_output_____" ] ], [ [ "def rmse(y_true, y_pred):\n \"\"\"Calculates RMSE evaluation metric.\n\n Args:\n y_true: tensor, true labels.\n y_pred: tensor, predicted labels.\n Returns:\n Tensor with value of RMSE between true and predicted labels.\n \"\"\"\n # TODO: Calculate RMSE from true and predicted labels\n return tf.sqrt(tf.reduce_mean((y_pred-y_true)**2))", "_____no_output_____" ] ], [ [ "### Lab Task #7: Build wide and deep model tying all of the pieces together.\n\nExcellent! We've assembled all of the pieces, now we just need to tie them all together into a Keras Model. This is NOT a simple feedforward model with no branching, side inputs, etc. so we can't use Keras' Sequential Model API. We're instead going to use Keras' Functional Model API. Here we will build the model using [tf.keras.models.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) giving our inputs and outputs and then compile our model with an optimizer, a loss function, and evaluation metrics.", "_____no_output_____" ] ], [ [ "def build_wide_deep_model(dnn_hidden_units=[64, 32], nembeds=3):\n \"\"\"Builds wide and deep model using Keras Functional API.\n\n Returns:\n `tf.keras.models.Model` object.\n \"\"\"\n # Create input layers\n inputs = create_input_layers()\n\n # Create feature columns\n wide_fc, deep_fc = create_feature_columns(nembeds)\n\n # The constructor for DenseFeatures takes a list of numeric columns\n # The Functional API in Keras requires: LayerConstructor()(inputs)\n\n # TODO: Add wide and deep feature colummns\n wide_inputs = tf.keras.layers.DenseFeatures(\n feature_columns=wide_fc.values(), name=\"wide_inputs\")(inputs)\n deep_inputs = tf.keras.layers.DenseFeatures(\n feature_columns=deep_fc.values(), name=\"deep_inputs\")(inputs)\n\n # Get output of model given inputs\n output = get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units)\n\n # Build model and compile it all together\n model = tf.keras.models.Model(inputs=inputs, outputs=output)\n\n # TODO: Add custom eval metrics to list\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=[\"mse\", rmse])\n\n return model\n\nprint(\"Here is our wide and deep architecture so far:\\n\")\nmodel = build_wide_deep_model()\nprint(model.summary())", "Here is our wide and deep architecture so far:\n\nModel: \"model_1\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ngestation_weeks (InputLayer) [(None,)] 0 \n__________________________________________________________________________________________________\nis_male (InputLayer) [(None,)] 0 \n__________________________________________________________________________________________________\nmother_age (InputLayer) [(None,)] 0 \n__________________________________________________________________________________________________\nplurality (InputLayer) [(None,)] 0 \n__________________________________________________________________________________________________\ndeep_inputs (DenseFeatures) (None, 5) 3000 gestation_weeks[0][0] \n is_male[0][0] \n mother_age[0][0] \n plurality[0][0] \n__________________________________________________________________________________________________\ndnn_1 (Dense) (None, 64) 384 deep_inputs[0][0] \n__________________________________________________________________________________________________\nwide_inputs (DenseFeatures) (None, 71) 0 gestation_weeks[0][0] \n is_male[0][0] \n mother_age[0][0] \n plurality[0][0] \n__________________________________________________________________________________________________\ndnn_2 (Dense) (None, 32) 2080 dnn_1[0][0] \n__________________________________________________________________________________________________\nlinear (Dense) (None, 10) 720 wide_inputs[0][0] \n__________________________________________________________________________________________________\nboth (Concatenate) (None, 42) 0 dnn_2[0][0] \n linear[0][0] \n__________________________________________________________________________________________________\nweight (Dense) (None, 1) 43 both[0][0] \n==================================================================================================\nTotal params: 6,227\nTrainable params: 6,227\nNon-trainable params: 0\n__________________________________________________________________________________________________\nNone\n" ] ], [ [ "We can visualize the wide and deep network using the Keras plot_model utility.", "_____no_output_____" ] ], [ [ "tf.keras.utils.plot_model(\n model=model, to_file=\"wd_model.png\", show_shapes=False, rankdir=\"LR\")", "_____no_output_____" ] ], [ [ "## Run and evaluate model", "_____no_output_____" ], [ "### Lab Task #8: Train and evaluate.\n\nWe've built our Keras model using our inputs from our CSV files and the architecture we designed. Let's now run our model by training our model parameters and periodically running an evaluation to track how well we are doing on outside data as training goes on. We'll need to load both our train and eval datasets and send those to our model through the fit method. Make sure you have the right pattern, batch size, and mode when loading the data. Also, don't forget to add the callback to TensorBoard.", "_____no_output_____" ] ], [ [ "TRAIN_BATCH_SIZE = 32\nNUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around\nNUM_EVALS = 5 # how many times to evaluate\n# Enough to get a reasonable sample, but not so much that it slows down\nNUM_EVAL_EXAMPLES = 10000\n\n# TODO: Load training dataset\ntrainds = load_dataset(\n pattern=\"train*\",\n batch_size=TRAIN_BATCH_SIZE,\n mode=tf.estimator.ModeKeys.TRAIN)\n\n# TODO: Load evaluation dataset\nevalds = load_dataset(\n pattern=\"eval*\",\n batch_size=1000,\n mode=tf.estimator.ModeKeys.EVAL).take(count=NUM_EVAL_EXAMPLES // 1000)\n\nsteps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)\n\nlogdir = os.path.join(\n \"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=logdir, histogram_freq=1)\n\n# TODO: Fit model on training dataset and evaluate every so often\nhistory = model.fit(\n trainds,\n validation_data=evalds,\n epochs=NUM_EVALS,\n steps_per_epoch=steps_per_epoch,\n callbacks=[tensorboard_callback])", "Train for 312 steps, validate for 10 steps\nEpoch 1/5\n312/312 [==============================] - 5s 15ms/step - loss: 1.8696 - mse: 1.8696 - rmse: 1.2285 - val_loss: 1.2763 - val_mse: 1.2763 - val_rmse: 1.1294\nEpoch 2/5\n312/312 [==============================] - 3s 8ms/step - loss: 1.1673 - mse: 1.1673 - rmse: 1.0681 - val_loss: 1.0742 - val_mse: 1.0742 - val_rmse: 1.0363\nEpoch 3/5\n312/312 [==============================] - 3s 8ms/step - loss: 1.0982 - mse: 1.0982 - rmse: 1.0377 - val_loss: 1.1666 - val_mse: 1.1666 - val_rmse: 1.0801\nEpoch 4/5\n312/312 [==============================] - 3s 8ms/step - loss: 1.1077 - mse: 1.1077 - rmse: 1.0439 - val_loss: 1.0761 - val_mse: 1.0761 - val_rmse: 1.0371\nEpoch 5/5\n312/312 [==============================] - 3s 9ms/step - loss: 1.0656 - mse: 1.0656 - rmse: 1.0212 - val_loss: 1.3020 - val_mse: 1.3020 - val_rmse: 1.1409\n" ] ], [ [ "### Visualize loss curve", "_____no_output_____" ] ], [ [ "# Plot\nnrows = 1\nncols = 2\nfig = plt.figure(figsize=(10, 5))\n\nfor idx, key in enumerate([\"loss\", \"rmse\"]):\n ax = fig.add_subplot(nrows, ncols, idx+1)\n plt.plot(history.history[key])\n plt.plot(history.history[\"val_{}\".format(key)])\n plt.title(\"model {}\".format(key))\n plt.ylabel(key)\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"validation\"], loc=\"upper left\");", "_____no_output_____" ] ], [ [ "### Save the model", "_____no_output_____" ] ], [ [ "OUTPUT_DIR = \"babyweight_trained_wd\"\nshutil.rmtree(OUTPUT_DIR, ignore_errors=True)\nEXPORT_PATH = os.path.join(\n OUTPUT_DIR, datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\ntf.saved_model.save(\n obj=model, export_dir=EXPORT_PATH) # with default serving function\nprint(\"Exported trained model to {}\".format(EXPORT_PATH))", "WARNING:tensorflow:From /opt/conda/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nINFO:tensorflow:Assets written to: babyweight_trained_wd/20201209174319/assets\nExported trained model to babyweight_trained_wd/20201209174319\n" ], [ "!ls $EXPORT_PATH", "assets\tsaved_model.pb\tvariables\n" ] ], [ [ "## Lab Summary: \nIn this lab, we started by defining the CSV column names, label column, and column defaults for our data inputs. Then, we constructed a tf.data Dataset of features and the label from the CSV files and created inputs layers for the raw features. Next, we set up feature columns for the model inputs and built a wide and deep neural network in Keras. We created a custom evaluation metric and built our wide and deep model. Finally, we trained and evaluated our model.", "_____no_output_____" ], [ "Copyright 2020 Google LLC\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n https://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d05ac57cfe518c27e3172019cac4b392b468b25e
38,923
ipynb
Jupyter Notebook
experiemnt1.ipynb
evinus/My-appproch-One
73783611478c3eb5863d9382728a4f10e0773140
[ "MIT" ]
null
null
null
experiemnt1.ipynb
evinus/My-appproch-One
73783611478c3eb5863d9382728a4f10e0773140
[ "MIT" ]
null
null
null
experiemnt1.ipynb
evinus/My-appproch-One
73783611478c3eb5863d9382728a4f10e0773140
[ "MIT" ]
null
null
null
63.703764
19,546
0.766822
[ [ [ "import cv2\nbild = cv2.imread(\"data\\ped2//training//frames//01//000.jpg\")\nbild2 = cv2.imread(\"data\\ped2//training//frames//01//001.jpg\")\nimport numpy as np\n\nlista = list()\nlista.append(bild)\nlista.append(bild2)\n\nlista = np.array(lista)", "_____no_output_____" ], [ "import cv2\nimport os\nimport numpy as np\nbilder = list()\nfor folder in os.listdir(\"data//avenue//testing//frames\"):\n path = os.path.join(\"data//avenue//testing//frames\",folder)\n for img in os.listdir(path):\n bild = os.path.join(path,img)\n #bilder.append(cv2.imread(bild))\n bilder.append(bild)\n\n#bilder = np.array(bilder)\n ", "_____no_output_____" ], [ "labels = np.load(\"data/frame_labels_ped2_2.npy\")\n#labels = np.reshape(labels,labels.shape[1])", "_____no_output_____" ], [ "import pandas as pd\nfjant = pd.DataFrame(data={\"x_col\":bilder,\"y_col\":labels})#columns=([\"x_col\",\"y_col\"]))\nfjant[\"y_col\"] = fjant[\"y_col\"].astype(str)", "_____no_output_____" ], [ "from keras_preprocessing.image import ImageDataGenerator\n\ndataget = ImageDataGenerator(rescale=1. / 255)\ntrain_get = dataget.flow_from_dataframe(dataframe=fjant,x_col=\"x_col\",y_col=\"y_col\",class_mode=\"sparse\",target_size=(360,240),batch_size=64)", "_____no_output_____" ] ], [ [ "__________________________", "_____no_output_____" ] ], [ [ "import numpy as np\nlabels = np.load(\"data/frame_labels_avenue.npy\")\nlabels = np.reshape(labels,labels.shape[1])", "_____no_output_____" ], [ "noll = 0\nett = 0\nfor x in Y_test:\n if x == 0:\n noll += 1\n else:\n ett +=1\nprint(\"Noll: \",noll)\nprint(\"Ett: \",ett)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(bilder,labels,test_size=0.2, random_state= 10)", "_____no_output_____" ], [ "#nylabels = np.concatenate((labels,nollor))\nnp.save(\"data/frame_labels_ped2_2.npy\",nylabels)", "_____no_output_____" ], [ "bilder = bilder.reshape(bilder.shape[0],bilder.shape[1],bilder.shape[2],bilder.shape[3],1)", "_____no_output_____" ], [ "from sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\nbilder = scaler.fit_transform(bilder)", "_____no_output_____" ], [ "output = np.full((2550,1),0)", "_____no_output_____" ], [ "ett = bilder[0,:,:,:]", "_____no_output_____" ], [ "import tensorflow.keras as keras\nbatch_size = 4\nmodel = keras.Sequential()\ninputs = keras.Input((240, 360, 3, 1))\n#model.add(keras.layers.Conv3D(input_shape = ,activation=\"relu\",filters=64,kernel_size=3,padding=\"same\"))\nmodel.add(keras.layers.Conv3D(activation=\"relu\",filters=64,kernel_size=3,padding=\"same\"))(inputs)\nmodel.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))\nmodel.add(keras.layers.BatchNormalization())\n\nmodel.add(keras.layers.Conv3D(activation=\"relu\",filters=64,kernel_size=3,padding=\"same\"))\nmodel.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))\n\nmodel.add(keras.layers.Conv3D(activation=\"relu\",filters=128,kernel_size=3,padding=\"same\"))\nmodel.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))\n\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(128,activation=\"relu\"))\nmodel.add(keras.layers.Dense(64,activation=\"relu\"))\nmodel.add(keras.layers.Dense(10,activation=\"relu\"))\nmodel.add(keras.layers.Dense(1,activation=\"sigmoid\"))\n\nmodel.compile(optimizer=\"adam\",metrics=keras.metrics.categorical_crossentropy)\nmodel.summary()", "_____no_output_____" ], [ "model = keras.Sequential()\n\nmodel.add(keras.layers.Conv3D(input_shape =(240, 360, 3, 1),activation=\"relu\",filters=64,kernel_size=3,padding=\"same\"))\nmodel.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))\nmodel.add(keras.layers.BatchNormalization())\n\nmodel.add(keras.layers.Conv3D(activation=\"relu\",filters=128,kernel_size=3,padding=\"same\"))\nmodel.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))\n\nmodel.add(keras.layers.Conv3D(activation=\"relu\",filters=128,kernel_size=2,padding=\"same\"))\nmodel.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))\nmodel.add(keras.layers.Dense(64,activation=\"relu\"))\n#model.add(keras.layers.GlobalAveragePooling3D())\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(256,activation=\"relu\"))\nmodel.add(keras.layers.Dense(64,activation=\"relu\"))\nmodel.add(keras.layers.Dense(10,activation=\"relu\"))\nmodel.add(keras.layers.Dense(1,activation=\"sigmoid\"))", "_____no_output_____" ], [ "from tensorflow.keras.layers import Dense,Conv3D,MaxPooling3D,BatchNormalization,Flatten,Input, Add\nfrom tensorflow.keras.models import Model\ninput = Input((240,360,3,1))\n\nx = Conv3D(64,3,padding=\"same\")(input)\nx = MaxPooling3D(pool_size=(3,3,3))(x)\nx = Flatten()(x)\nx = Dense(128)(x)\n\n#y = Dense(128)(input)\ny = Flatten()(input)\ny = Dense(128)(y)\ny = Dense(128)(y)\nx = Add()([x,y])\nx = Dense(10)(x)\nx = Dense(1)(x)\n\nmodel = Model(inputs = input,outputs = x)\nmodel.compile()\nmodel.summary()\nfrom tensorflow.keras.utils import plot_model\nplot_model(model,show_shapes=True)\n", "_____no_output_____" ], [ "from tensorflow.keras.utils import plot_model\nplot_model(model,show_shapes=True)", "_____no_output_____" ], [ "with open('data//UCFCrime2Local//UCFCrime2Local//Train_split_AD.txt') as f:\n lines = f.readlines()", "_____no_output_____" ], [ "import cv2 \nimport numpy as np \nimport os\nfrom pathlib import *\n\npath = \"data/UFC\"\n\nfilms = list()\nfiles = (x for x in Path(path).iterdir() if x.is_file())\nfor file in files:\n #print(str(file.name).split(\"_\")[0], \"is a file!\")\n films.append(str(file.name).split(\"_\")[0])\n ", "_____no_output_____" ], [ "for x in range(len(lines)):\n if lines[x].strip() != films[x]:\n print(lines[x])\n break", "_____no_output_____" ], [ "import cv2 \nimport numpy as np \nimport os\nfrom pathlib import *\n\npath = \"data//UCFCrime2Local//UCFCrime2Local//Txt annotations\"\n\nfiles = (x for x in Path(path).iterdir() if x.is_file())\nfor file in files:\n films = list()\n name = file.name.split(\".\")[0]\n with open(file) as f:\n lines = f.readlines()\n for line in lines:\n lost = int(line.split(\" \")[6])\n if lost == 0:\n lost = 1\n else:\n lost = 0\n films.append(lost)\n films = np.array(films)\n np.save(os.path.join(\"data//UFC//training\",name + \".npy\"),films)\n \n #print(str(file.name).split(\"_\")[0], \"is a file!\")\n #films.append(str(file.name).split(\" \")[6])", "_____no_output_____" ], [ "import cv2 \nimport numpy as np \nimport os\nfrom pathlib import *\n\nfile = \"data//UCFCrime2Local//UCFCrime2Local//Txt annotations//Burglary099.txt\"\n\nfilms = list()\nname = \"Burglary099\"\nwith open(file) as f:\n lines = f.readlines()\n for line in lines:\n lost = int(line.split(\" \")[6])\n if lost == 0:\n lost = 1\n else:\n lost = 0\n films.append(lost)\n films = np.array(films)\n np.save(os.path.join(\"data//UFC//testing\",name + \".npy\"),films)", "_____no_output_____" ], [ "import numpy as np\nassult = np.load(\"data//UFC//testing//NormalVideos004.npy\")", "_____no_output_____" ], [ "sub = os.listdir(\"data//UFC//training//frames\")", "_____no_output_____" ], [ "sub = os.listdir(\"data//UFC//testing//frames\")", "_____no_output_____" ], [ "import numpy as np\nfor name in sub:\n if \"Normal\" in name:\n files = os.listdir(os.path.join(\"data//UFC//training//frames\",name))\n name = name.split(\"_\")[0:2]\n name = name[0] + name[1]\n tom = np.zeros((len(files),),np.int8)\n np.save(os.path.join(\"data//UFC//training\",name),tom)\n \n ", "_____no_output_____" ], [ "import tensorflow.keras as keras\n\nkeras.models.load_model(\"flow_inception_i3d_kinetics_only_tf_dim_ordering_tf_kernels_no_top.h5\")", "_____no_output_____" ], [ "import math\nimport tensorflow.keras as keras\nimport cv2\nimport os\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import config\nfrom utils import Dataloader\nfrom sklearn.metrics import roc_auc_score , roc_curve\nfrom pathlib import *\n\ngpus = config.experimental.list_physical_devices('GPU') \nconfig.experimental.set_memory_growth(gpus[0], True)\n\ntest_bilder = list()\nfor folder in os.listdir(\"data//UFC//testing//frames\"):\n path = os.path.join(\"data//UFC//testing//frames\",folder)\n #bildmappar.append(folder)\n for img in os.listdir(path):\n bild = os.path.join(path,img)\n test_bilder.append(bild)\n \n\n\ntest_etiketter = list()\npath = \"data//UFC//testing\"\ntestnings_ettiketter = (x for x in Path(path).iterdir() if x.is_file())\nfor ettiket in testnings_ettiketter:\n test_etiketter.append(np.load(ettiket))\n \n\ntest_etiketter = np.concatenate(test_etiketter,axis=0)\nbatch_size = 16\ntest_gen = Dataloader(test_bilder,test_etiketter,batch_size)\nreconstructed_model = keras.models.load_model(\"modelUFC3D_4-ep004-loss0.367-val_loss0.421.tf\")\nvalidation_steps = math.floor( len(test_bilder) / batch_size)\n", "_____no_output_____" ], [ "y_score = reconstructed_model.predict(test_gen,verbose=1)", "6241/6241 [==============================] - 1368s 219ms/step\n" ], [ "\nauc = roc_auc_score(test_etiketter,y_score=y_score)\nprint('AUC: ', auc*100, '%')", "AUC: 68.04392542758247 %\n" ], [ "with open('y_score.npy', 'wb') as f:\n np.save(f, y_score)", "_____no_output_____" ], [ "from sklearn.metrics import RocCurveDisplay\nimport matplotlib.pyplot as plt\n\nRocCurveDisplay.from_predictions(test_etiketter,y_score)\n\nplt.figure(figsize=(18, 6))\nplt.get_figlabels()\nplt.show", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05ad4eb46008af5637e9ccea9ba710879b60023
7,144
ipynb
Jupyter Notebook
TensorFlow-Examples/notebooks/3_NeuralNetworks/neural_network_raw.ipynb
elitej13/project-neural-ersatz
9fa775ce93249829c69bd61088ea019fd7d3903a
[ "MIT" ]
null
null
null
TensorFlow-Examples/notebooks/3_NeuralNetworks/neural_network_raw.ipynb
elitej13/project-neural-ersatz
9fa775ce93249829c69bd61088ea019fd7d3903a
[ "MIT" ]
null
null
null
TensorFlow-Examples/notebooks/3_NeuralNetworks/neural_network_raw.ipynb
elitej13/project-neural-ersatz
9fa775ce93249829c69bd61088ea019fd7d3903a
[ "MIT" ]
null
null
null
31.751111
355
0.56173
[ [ [ "# Neural Network Example\n\nBuild a 2-hidden layers fully connected neural network (a.k.a multilayer perceptron) with TensorFlow.\n\n- Author: Aymeric Damien\n- Project: https://github.com/aymericdamien/TensorFlow-Examples/", "_____no_output_____" ], [ "## Neural Network Overview\n\n<img src=\"http://cs231n.github.io/assets/nn1/neural_net2.jpeg\" alt=\"nn\" style=\"width: 400px;\"/>\n\n## MNIST Dataset Overview\n\nThis example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).\n\n![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n\nMore info: http://yann.lecun.com/exdb/mnist/", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nimport tensorflow as tf", "Extracting /tmp/data/train-images-idx3-ubyte.gz\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\n" ], [ "# Parameters\nlearning_rate = 0.1\nnum_steps = 500\nbatch_size = 128\ndisplay_step = 100\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer number of neurons\nn_hidden_2 = 256 # 2nd layer number of neurons\nnum_input = 784 # MNIST data input (img shape: 28*28)\nnum_classes = 10 # MNIST total classes (0-9 digits)\n\n# tf Graph input\nX = tf.placeholder(\"float\", [None, num_input])\nY = tf.placeholder(\"float\", [None, num_classes])", "_____no_output_____" ], [ "# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n}", "_____no_output_____" ], [ "# Create model\ndef neural_net(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer", "_____no_output_____" ], [ "# Construct model\nlogits = neural_net(X)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n\n# Evaluate model (with test logits, for dropout to be disabled)\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()", "_____no_output_____" ], [ "# Start training\nwith tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n for step in range(1, num_steps+1):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for MNIST test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: mnist.test.images,\n Y: mnist.test.labels}))", "Step 1, Minibatch Loss= 13208.1406, Training Accuracy= 0.266\nStep 100, Minibatch Loss= 462.8610, Training Accuracy= 0.867\nStep 200, Minibatch Loss= 232.8298, Training Accuracy= 0.844\nStep 300, Minibatch Loss= 85.2141, Training Accuracy= 0.891\nStep 400, Minibatch Loss= 38.0552, Training Accuracy= 0.883\nStep 500, Minibatch Loss= 55.3689, Training Accuracy= 0.867\nOptimization Finished!\nTesting Accuracy: 0.8729\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d05ade57ccd01d970454b38420232f68fac0b1d6
29,318
ipynb
Jupyter Notebook
analysis/milestone2.ipynb
data301-2020-winter1/course-project-solo_311
4e7a36ca377a87852e5cba7583d0d203d190d3c2
[ "MIT" ]
null
null
null
analysis/milestone2.ipynb
data301-2020-winter1/course-project-solo_311
4e7a36ca377a87852e5cba7583d0d203d190d3c2
[ "MIT" ]
null
null
null
analysis/milestone2.ipynb
data301-2020-winter1/course-project-solo_311
4e7a36ca377a87852e5cba7583d0d203d190d3c2
[ "MIT" ]
null
null
null
33.315909
150
0.345692
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df = (\n df.assign(year=pd.to_datetime(df['year'], errors = 'coerce').dt.year)\n .dropna()\n .reset_index()\n .drop(columns=['index'])\n)\ndf['year']=df['year'].astype(int)\n", "_____no_output_____" ], [ "def load_and_process(url_or_path_to_csv_file):\n # Method Chain 1 (Load data and deal with missing data)\n\n df1 = (\n pd.read_excel(url_or_path_to_csv_file)\n .dropna()\n .reset_index()\n )\n\n # Method Chain 2 (Create new columns, drop others, and do processing)\n\n df2 = (\n df1\n .assign(year=pd.to_datetime(df['year'], errors = 'coerce').dt.year)\n .dropna()\n .reset_index()\n .drop(columns=['index'])\n )\n df2['year']=df2['year'].astype(int) #For some reason this did not work when it was in the method chain\n\n # Make sure to return the latest dataframe\n\n return df2 ", "_____no_output_____" ], [ "df = load_and_process(r\"C:\\Users\\Nolan\\Desktop\\DATA301\\course-project-solo_311\\data\\raw\\Meteorite_Landings.xlsx\")", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "from scripts import project_functions\ndf = project_functions.load_and_process(r\"C:\\Users\\Nolan\\Desktop\\DATA301\\course-project-solo_311\\data\\raw\\Meteorite_Landings.xlsx\")\ndf", "_____no_output_____" ], [ "df3 = (\n df.assign(year = df['year'].astype(int))\n .query('year < 2021')\n\n)\n", "_____no_output_____" ], [ "df3['year'] = df3['year']<2021", "_____no_output_____" ], [ "df3", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\n\n\nfrom scripts import project_functions\n\ndf1 = (\n pd.read_excel(r\"C:\\Users\\Nolan\\Desktop\\DATA301\\course-project-solo_311\\data\\raw\\Meteorite_Landings.xlsx\")\n #.dropna()\n #.reset_index()\n )\n\n# Method Chain 2 (Create new columns, drop others, and do processing)\n\ndf2 = (\n df1\n .assign(year=pd.to_datetime(df1['year'], errors = 'coerce').dt.year)\n #.dropna()\n .reset_index()\n .drop(columns=['level_0'])\n\n )\ndf = (\ndf2.assign(year = df2['year'].astype(int))\n.query('year < 2021')\n.drop(columns=['GeoLocation'])\n.rename(columns={\"reclong\": \"long\", \"reclat\": \"lat\", \"recclass\": \"class\", \"mass (g)\": \"mass\"})\n)\ndf.count()", "name 45716\nid 45716\nnametype 45716\nrecclass 45716\nmass (g) 45585\nfall 45716\nyear 45425\nreclat 38401\nreclong 38401\nGeoLocation 38401\ndtype: int64\nlevel_0 37810\nindex 37810\nname 37810\nid 37810\nnametype 37810\nclass 37810\nmass 37810\nfall 37810\nyear 37810\nlat 37810\nlong 37810\ndtype: int64\n" ], [ "df.to_csv(r'C:\\Users\\Nolan\\Desktop\\DATA301\\course-project-solo_311\\data\\processed\\meteorite_landings_processed.csv', index = False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05b0886515322555f1fdd8f1496d3cf7df89985
130,264
ipynb
Jupyter Notebook
Wi20_content/U12_Object_Recognition/L2_feature_extraction.ipynb
uw-cheme599/uw-cheme599.github.io
49879d627165bfac148114ff4dafff6865251174
[ "BSD-3-Clause" ]
null
null
null
Wi20_content/U12_Object_Recognition/L2_feature_extraction.ipynb
uw-cheme599/uw-cheme599.github.io
49879d627165bfac148114ff4dafff6865251174
[ "BSD-3-Clause" ]
null
null
null
Wi20_content/U12_Object_Recognition/L2_feature_extraction.ipynb
uw-cheme599/uw-cheme599.github.io
49879d627165bfac148114ff4dafff6865251174
[ "BSD-3-Clause" ]
null
null
null
731.820225
111,912
0.956028
[ [ [ "from skimage import data\nfrom skimage.filters import gaussian\nimport numpy as np\nfrom scipy.stats import moment\nfrom skimage import feature\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "image = data.coins()\nplt.imshow(image, cmap='gray')\nplt.axis('off')", "_____no_output_____" ], [ "m_int = np.mean(image)\nm_std = np.std(image)\nmin_int = np.min(image)\nmax_int = np.max(image)\nm_2 = moment(image, moment=2, axis=None)\nm_3 = moment(image, moment=3, axis=None)\nm_4 = moment(image, moment=4, axis=None)", "_____no_output_____" ], [ "m_2 = moment(image, moment=1, axis=None)", "_____no_output_____" ], [ "np.mean(np.array([1, 2, 3, 4, 5]))", "_____no_output_____" ], [ "edges = feature.canny(gaussian(image, sigma=1))\nplt.imshow(edges, cmap='gray')", "_____no_output_____" ], [ "np.mean(edges)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d05b13702fd4ba5aac5b400582ed98b57d3bcec6
110,612
ipynb
Jupyter Notebook
t81_558_class_10_3_text_generation.ipynb
tenyi257/t81_558_deep_learning
b2fffd1b89d3d37adf4c9d82c4cbc991f871f3a3
[ "Apache-2.0" ]
5
2021-03-16T10:10:18.000Z
2021-03-16T10:10:26.000Z
t81_558_class_10_3_text_generation.ipynb
tenyi257/t81_558_deep_learning
b2fffd1b89d3d37adf4c9d82c4cbc991f871f3a3
[ "Apache-2.0" ]
null
null
null
t81_558_class_10_3_text_generation.ipynb
tenyi257/t81_558_deep_learning
b2fffd1b89d3d37adf4c9d82c4cbc991f871f3a3
[ "Apache-2.0" ]
2
2021-12-05T21:10:17.000Z
2022-02-12T08:29:21.000Z
66.076464
1,564
0.554162
[ [ [ "<a href=\"https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_10_3_text_generation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n", "_____no_output_____" ], [ "# T81-558: Applications of Deep Neural Networks\n**Module 10: Time Series in Keras**\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).", "_____no_output_____" ], [ "# Module 10 Material\n\n* Part 10.1: Time Series Data Encoding for Deep Learning [[Video]](https://www.youtube.com/watch?v=dMUmHsktl04&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_1_timeseries.ipynb)\n* Part 10.2: Programming LSTM with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=wY0dyFgNCgY&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_2_lstm.ipynb)\n* **Part 10.3: Text Generation with Keras and TensorFlow** [[Video]](https://www.youtube.com/watch?v=6ORnRAz3gnA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_3_text_generation.ipynb)\n* Part 10.4: Image Captioning with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=NmoW_AYWkb4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_4_captioning.ipynb)\n* Part 10.5: Temporal CNN in Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=i390g8acZwk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_5_temporal_cnn.ipynb)", "_____no_output_____" ], [ "# Google CoLab Instructions\n\nThe following code ensures that Google CoLab is running the correct version of TensorFlow.", "_____no_output_____" ] ], [ [ "try:\n %tensorflow_version 2.x\n COLAB = True\n print(\"Note: using Google CoLab\")\nexcept:\n print(\"Note: not using Google CoLab\")\n COLAB = False", "_____no_output_____" ] ], [ [ "# Part 10.3: Text Generation with LSTM\n\nRecurrent neural networks are also known for their ability to generate text. As a result, the output of the neural network can be free-form text. In this section, we will see how to train an LSTM can on a textual document, such as classic literature, and learn to output new text that appears to be of the same form as the training material. If you train your LSTM on [Shakespeare](https://en.wikipedia.org/wiki/William_Shakespeare), it will learn to crank out new prose similar to what Shakespeare had written. \n\nDon't get your hopes up. You are not going to teach your deep neural network to write the next [Pulitzer Prize for Fiction](https://en.wikipedia.org/wiki/Pulitzer_Prize_for_Fiction). The prose generated by your neural network will be nonsensical. However, it will usually be nearly grammatically and of a similar style as the source training documents. \n\nA neural network generating nonsensical text based on literature may not seem useful at first glance. However, this technology gets so much interest because it forms the foundation for many more advanced technologies. The fact that the LSTM will typically learn human grammar from the source document opens a wide range of possibilities. You can use similar technology to complete sentences when a user is entering text. Simply the ability to output free-form text becomes the foundation of many other technologies. In the next part, we will use this technique to create a neural network that can write captions for images to describe what is going on in the picture. \n\n### Additional Information\n\nThe following are some of the articles that I found useful in putting this section together.\n\n* [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness/)\n* [Keras LSTM Generation Example](https://keras.io/examples/lstm_text_generation/)\n\n### Character-Level Text Generation\n\nThere are several different approaches to teaching a neural network to output free-form text. The most basic question is if you wish the neural network to learn at the word or character level. In many ways, learning at the character level is the more interesting of the two. The LSTM is learning to construct its own words without even being shown what a word is. We will begin with character-level text generation. In the next module, we will see how we can use nearly the same technique to operate at the word level. We will implement word-level automatic captioning in the next module.\n\nWe begin by importing the needed Python packages and defining the sequence length, named **maxlen**. Time-series neural networks always accept their input as a fixed-length array. Because you might not use all of the sequence elements, it is common to fill extra elements with zeros. You will divide the text into sequences of this length, and the neural network will train to predict what comes after this sequence.", "_____no_output_____" ] ], [ [ "from tensorflow.keras.callbacks import LambdaCallback\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.utils import get_file\nimport numpy as np\nimport random\nimport sys\nimport io\nimport requests\nimport re", "_____no_output_____" ] ], [ [ "For this simple example, we will train the neural network on the classic children's book [Treasure Island](https://en.wikipedia.org/wiki/Treasure_Island). We begin by loading this text into a Python string and displaying the first 1,000 characters.", "_____no_output_____" ] ], [ [ "r = requests.get(\"https://data.heatonresearch.com/data/t81-558/text/\"\\\n \"treasure_island.txt\")\nraw_text = r.text\nprint(raw_text[0:1000])\n", "The Project Gutenberg EBook of Treasure Island, by Robert Louis Stevenson\r\n\r\nThis eBook is for the use of anyone anywhere at no cost and with\r\nalmost no restrictions whatsoever. You may copy it, give it away or\r\nre-use it under the terms of the Project Gutenberg License included\r\nwith this eBook or online at www.gutenberg.net\r\n\r\n\r\nTitle: Treasure Island\r\n\r\nAuthor: Robert Louis Stevenson\r\n\r\nIllustrator: Milo Winter\r\n\r\nRelease Date: January 12, 2009 [EBook #27780]\r\n\r\nLanguage: English\r\n\r\n\r\n*** START OF THIS PROJECT GUTENBERG EBOOK TREASURE ISLAND ***\r\n\r\n\r\n\r\n\r\nProduced by Juliet Sutherland, Stephen Blundell and the\r\nOnline Distributed Proofreading Team at http://www.pgdp.net\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n THE ILLUSTRATED CHILDREN'S LIBRARY\r\n\r\n\r\n _Treasure Island_\r\n\r\n Robert Louis Stevenson\r\n\r\n _Illustrated by_\r\n Milo Winter\r\n\r\n\r\n [Illustration]\r\n\r\n\r\n GRAMERCY BOOKS\r\n NEW YORK\r\n\r\n\r\n\r\n\r\n Foreword copyright © 1986 by Random House V\n" ] ], [ [ "We will extract all unique characters from the text and sort them. This technique allows us to assign a unique ID to each character. Because we sorted the characters, these IDs should remain the same. If we add new characters to the original text, then the IDs would change. We build two dictionaries. The first **char2idx** is used to convert a character into its ID. The second **idx2char** converts an ID back into its character.", "_____no_output_____" ] ], [ [ "processed_text = raw_text.lower()\nprocessed_text = re.sub(r'[^\\x00-\\x7f]',r'', processed_text) ", "_____no_output_____" ], [ "print('corpus length:', len(processed_text))\n\nchars = sorted(list(set(processed_text)))\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))", "corpus length: 397400\ntotal chars: 60\n" ] ], [ [ "We are now ready to build the actual sequences. Just like previous neural networks, there will be an $x$ and $y$. However, for the LSTM, $x$ and $y$ will both be sequences. The $x$ input will specify the sequences where $y$ are the expected output. The following code generates all possible sequences.", "_____no_output_____" ] ], [ [ "# cut the text in semi-redundant sequences of maxlen characters\nmaxlen = 40\nstep = 3\nsentences = []\nnext_chars = []\nfor i in range(0, len(processed_text) - maxlen, step):\n sentences.append(processed_text[i: i + maxlen])\n next_chars.append(processed_text[i + maxlen])\nprint('nb sequences:', len(sentences))", "nb sequences: 132454\n" ], [ "sentences", "_____no_output_____" ], [ "print('Vectorization...')\nx = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n x[i, t, char_indices[char]] = 1\n y[i, char_indices[next_chars[i]]] = 1", "Vectorization...\n" ], [ "x.shape", "_____no_output_____" ], [ "y.shape", "_____no_output_____" ] ], [ [ "The dummy variables for $y$ are shown below.", "_____no_output_____" ] ], [ [ "y[0:10]", "_____no_output_____" ] ], [ [ "Next, we create the neural network. This neural network's primary feature is the LSTM layer, which allows the sequences to be processed. ", "_____no_output_____" ] ], [ [ "# build the model: a single LSTM\nprint('Build model...')\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(maxlen, len(chars))))\nmodel.add(Dense(len(chars), activation='softmax'))\n\noptimizer = RMSprop(lr=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)", "Build model...\n" ], [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm (LSTM) (None, 128) 96768 \n_________________________________________________________________\ndense (Dense) (None, 60) 7740 \n=================================================================\nTotal params: 104,508\nTrainable params: 104,508\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "The LSTM will produce new text character by character. We will need to sample the correct letter from the LSTM predictions each time. The **sample** function accepts the following two parameters:\n\n* **preds** - The output neurons.\n* **temperature** - 1.0 is the most conservative, 0.0 is the most confident (willing to make spelling and other errors).\n\nThe sample function below is essentially performing a [softmax]() on the neural network predictions. This causes each output neuron to become a probability of its particular letter. ", "_____no_output_____" ] ], [ [ "def sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)", "_____no_output_____" ] ], [ [ "Keras calls the following function at the end of each training Epoch. The code generates sample text generations that visually demonstrate the neural network better at text generation. As the neural network trains, the generations should look more realistic.", "_____no_output_____" ] ], [ [ "def on_epoch_end(epoch, _):\n # Function invoked at end of each epoch. Prints generated text.\n print(\"******************************************************\")\n print('----- Generating text after Epoch: %d' % epoch)\n\n start_index = random.randint(0, len(processed_text) - maxlen - 1)\n for temperature in [0.2, 0.5, 1.0, 1.2]:\n print('----- temperature:', temperature)\n\n generated = ''\n sentence = processed_text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(400):\n x_pred = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds, temperature)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n", "_____no_output_____" ] ], [ [ "We are now ready to train. It can take up to an hour to train this network, depending on how fast your computer is. If you have a GPU available, please make sure to use it.", "_____no_output_____" ] ], [ [ "# Ignore useless W0819 warnings generated by TensorFlow 2.0. Hopefully can remove this ignore in the future.\n# See https://github.com/tensorflow/tensorflow/issues/31308\nimport logging, os\nlogging.disable(logging.WARNING)\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n# Fit the model\nprint_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\nmodel.fit(x, y,\n batch_size=128,\n epochs=60,\n callbacks=[print_callback])", "Train on 132454 samples\nEpoch 1/60\n 128/132454 [..............................] - ETA: 35:39******************************************************\n----- Generating text after Epoch: 0\n----- temperature: 0.2\n----- Generating with seed: \"im shouting.\n\nbut you may suppose i pa\"\nim shouting.\n\nbut you may suppose i pa" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d05b171bc3d96408047e80ef7ac0d205e1a32ef0
290,304
ipynb
Jupyter Notebook
Final/Figure-3/figure-3-its.ipynb
gregcaporaso/office-microbes
d36656f65efca9770116e95d261469684c980831
[ "BSD-3-Clause" ]
2
2018-08-02T03:59:38.000Z
2019-04-12T13:19:40.000Z
Final/Figure-3/figure-3-its.ipynb
gregcaporaso/office-microbes
d36656f65efca9770116e95d261469684c980831
[ "BSD-3-Clause" ]
null
null
null
Final/Figure-3/figure-3-its.ipynb
gregcaporaso/office-microbes
d36656f65efca9770116e95d261469684c980831
[ "BSD-3-Clause" ]
2
2021-02-09T13:10:12.000Z
2021-04-12T01:43:48.000Z
257.590062
145,342
0.910428
[ [ [ "%matplotlib inline\nimport pandas as pd\nfrom os.path import join\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport skbio\n# from q2d2 import get_within_between_distances, filter_dm_and_map\nfrom stats import mc_t_two_sample\nfrom skbio.stats.distance import anosim, permanova\nfrom skbio.stats.composition import ancom, multiplicative_replacement\nimport itertools", "/home/johnchase/.conda/envs/da/lib/python3.5/site-packages/matplotlib/__init__.py:872: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.\n warnings.warn(self.msg_depr % (key, alt_key))\n" ] ], [ [ "##Define a couple of helper functions", "_____no_output_____" ] ], [ [ "def get_within_between_distances(map_df, dm, col):\n filtered_dm, filtered_map = filter_dm_and_map(dm, map_df)\n groups = []\n distances = []\n map_dict = filtered_map[col].to_dict()\n for id_1, id_2 in itertools.combinations(filtered_map.index.tolist(), 2):\n row = []\n if map_dict[id_1] == map_dict[id_2]:\n groups.append('Within')\n else:\n groups.append('Between')\n distances.append(filtered_dm[(id_1, id_2)])\n groups = zip(groups, distances)\n distances_df = pd.DataFrame(data=list(groups), columns=['Groups', 'Distance'])\n\n return distances_df\n \n \ndef filter_dm_and_map(dm, map_df):\n ids_to_exclude = set(dm.ids) - set(map_df.index.values)\n ids_to_keep = set(dm.ids) - ids_to_exclude\n filtered_dm = dm.filter(ids_to_keep)\n filtered_map = map_df.loc[ids_to_keep]\n\n return filtered_dm, filtered_map", "_____no_output_____" ], [ "colors = sns.color_palette(\"YlGnBu\", 100)\nsns.palplot(colors)", "_____no_output_____" ] ], [ [ "Load mapping file and munge it\n-----------------", "_____no_output_____" ] ], [ [ "home = '/home/office-microbe-files'\nmap_fp = join(home, 'master_map_150908.txt')", "_____no_output_____" ], [ "sample_md = pd.read_csv(map_fp, sep='\\t', index_col=0, dtype=str)\nsample_md = sample_md[sample_md['16SITS'] == 'ITS']\nsample_md = sample_md[sample_md['OfficeSample'] == 'yes']", "_____no_output_____" ], [ "replicate_ids = '''F2F.2.Ce.021\nF2F.2.Ce.022\nF2F.3.Ce.021\nF2F.3.Ce.022\nF2W.2.Ca.021\nF2W.2.Ca.022\nF2W.2.Ce.021\nF2W.2.Ce.022\nF3W.2.Ce.021\nF3W.2.Ce.022\nF1F.3.Ca.021\nF1F.3.Ca.022\nF1C.3.Ca.021\nF1C.3.Ca.022\nF1W.2.Ce.021\nF1W.2.Ce.022\nF1W.3.Dr.021\nF1W.3.Dr.022\nF1C.3.Dr.021\nF1C.3.Dr.022\nF2W.3.Dr.059\nF3F.2.Ce.078'''.split('\\n')", "_____no_output_____" ], [ "reps = sample_md[sample_md['Description'].isin(replicate_ids)]\nreps = reps.drop(reps.drop_duplicates('Description').index).index\nsample_md.drop(reps, inplace=True)", "_____no_output_____" ] ], [ [ "Load alpha diversity\n----------------------", "_____no_output_____" ] ], [ [ "alpha_div_fp = '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_open/arare_max999/alpha_div_collated/observed_species.txt'\nalpha_div = pd.read_csv(alpha_div_fp, sep='\\t', index_col=0)\nalpha_div = alpha_div.T.drop(['sequences per sample', 'iteration'])\nalpha_cols = [e for e in alpha_div.columns if '990' in e]\nalpha_div = alpha_div[alpha_cols]\nsample_md = pd.concat([sample_md, alpha_div], axis=1, join='inner')\nsample_md['MeanAlpha'] = sample_md[alpha_cols].mean(axis=1)\nsample_md['MedianAlpha'] = sample_md[alpha_cols].median(axis=1)\nalpha_div = pd.read_csv(alpha_div_fp, sep='\\t', index_col=0)\nalpha_div = alpha_div.T.drop(['sequences per sample', 'iteration'])\nalpha_cols = [e for e in alpha_div.columns if '990' in e]\nalpha_div = alpha_div[alpha_cols]", "_____no_output_____" ] ], [ [ "add alpha diversity to map\n-------------", "_____no_output_____" ] ], [ [ "sample_md = pd.concat([sample_md, alpha_div], axis=1, join='inner')\nsample_md['MeanAlpha'] = sample_md[alpha_cols].mean(axis=1)", "_____no_output_____" ] ], [ [ "Filter the samples so that only corrosponding row 2, 3 samples are included\n-----------------------------------------------------------", "_____no_output_____" ] ], [ [ "sample_md['NoRow'] = sample_md['Description'].apply(lambda x: x[:3] + x[5:])\nrow_df = sample_md[sample_md.duplicated('NoRow', keep=False)].copy()", "_____no_output_____" ], [ "row_df['SampleType'] = 'All Row 2/3 Pairs (n={0})'.format(int(len(row_df)/2))\nplot_row_df = row_df[['Row', 'MeanAlpha', 'SampleType']]\n\n\nsample_md_wall = row_df[row_df['PlateLocation'] != 'floor'].copy()\nsample_md_wall['SampleType'] = 'Wall and Ceiling Pairs (n={0})'.format(int(len(sample_md_wall)/2))\nplot_sample_md_wall = sample_md_wall[['Row', 'MeanAlpha', 'SampleType']]\n\nsample_md_floor = row_df[row_df['PlateLocation'] == 'floor'].copy()\nsample_md_floor['SampleType'] = 'Floor Pairs (n={0})'.format(int(len(sample_md_floor)/2))\nplot_sample_md_floor = sample_md_floor[['Row', 'MeanAlpha', 'SampleType']]\n\nplot_df = pd.concat([plot_row_df, plot_sample_md_wall, plot_sample_md_floor])", "_____no_output_____" ], [ "with plt.rc_context(dict(sns.axes_style(\"darkgrid\"),\n **sns.plotting_context(\"notebook\", font_scale=2.5))):\n plt.figure(figsize=(20, 11))\n\n ax = sns.violinplot(x='SampleType', y='MeanAlpha', data=plot_df, hue='Row', hue_order=['3', '2'], \n palette=\"YlGnBu\")\n ax.set_xlabel('')\n handles, labels = ax.get_legend_handles_labels()\n ax.set_ylabel('OTU Counts')\n ax.set_title('OTU Counts')\n ax.legend(handles, ['Frequent', 'Infrequent'], title='Sampling Frequency')\n ax.get_legend().get_title().set_fontsize('15')\n plt.savefig('figure-3-its-A.svg', dpi=300)", "_____no_output_____" ], [ "row_2_values = list(row_df[(row_df['Row'] == '2')]['MeanAlpha'])\nrow_3_values = list(row_df[(row_df['Row'] == '3')]['MeanAlpha'])\nobs_t, param_p_val, perm_t_stats, nonparam_p_val = mc_t_two_sample(row_2_values, row_3_values)\nobs_t, param_p_val\nprint((obs_t, param_p_val), \"row 2 mean: {0}, row 1 mean: {1}\".format(np.mean(row_2_values),np.mean(row_3_values)))", "(4.178368737487422, 3.5310986970626956e-05) row 2 mean: 129.0457013574661, row 1 mean: 101.25458515283844\n" ], [ "row_2_values = list(sample_md_wall[(sample_md_wall['Row'] == '2')]['MeanAlpha'])\nrow_3_values = list(sample_md_wall[(sample_md_wall['Row'] == '3')]['MeanAlpha'])\nobs_t, param_p_val, perm_t_stats, nonparam_p_val = mc_t_two_sample(row_2_values, row_3_values)\nprint((obs_t, param_p_val), \"row 2 mean: {0}, row 1 mean: {1}\".format(np.mean(row_2_values),np.mean(row_3_values)))", "(-0.07797815663743306, 0.93791761535347895) row 2 mean: 77.58396226415094, row 1 mean: 78.07857142857142\n" ], [ "row_2_values = list(sample_md_floor[(sample_md_floor['Row'] == '2')]['MeanAlpha'])\nrow_3_values = list(sample_md_floor[(sample_md_floor['Row'] == '3')]['MeanAlpha'])\nobs_t, param_p_val, perm_t_stats, nonparam_p_val = mc_t_two_sample(row_2_values, row_3_values)\nprint((obs_t, param_p_val), \"row 2 mean: {0}, row 1 mean: {1}\".format(np.mean(row_2_values),np.mean(row_3_values)))", "(5.8449136803810715, 1.7233641180780523e-08) row 2 mean: 176.48000000000002, row 1 mean: 123.44017094017092\n" ] ], [ [ "#Beta Diversity!\n", "_____no_output_____" ], [ "Create beta diversity boxplots of within and bewteen distances for row. It may not make a lot of sense doing this for all samples as the location and or city affect may drown out the row affect", "_____no_output_____" ], [ "Load the distance matrix\n----------------------", "_____no_output_____" ] ], [ [ "dm = skbio.DistanceMatrix.read(join(home, '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_open/bdiv_even999/binary_jaccard_dm.txt'))", "_____no_output_____" ] ], [ [ "Run permanova and recored within between values on various categories\n----------------------\n\nAll of these will be based on the row 2, 3 paired samples, though they may be filtered to avoind confounding variables", "_____no_output_____" ], [ "###Row distances", "_____no_output_____" ] ], [ [ "filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]\nfilt_dm, filt_map = filter_dm_and_map(dm, filt_map)\n\nrow_dists = get_within_between_distances(filt_map, filt_dm, 'Row')\nrow_dists['Category'] = 'Row (n=198)'\n\npermanova(filt_dm, filt_map, column='Row', permutations=999)", "_____no_output_____" ] ], [ [ "###Plate location\n\nWe can use the same samples for this as the previous test", "_____no_output_____" ] ], [ [ "plate_dists = get_within_between_distances(filt_map, filt_dm, 'PlateLocation')\nplate_dists['Category'] = 'Plate Location (n=198)'\n\npermanova(filt_dm, filt_map, column='PlateLocation', permutations=999)", "_____no_output_____" ] ], [ [ "###Run", "_____no_output_____" ] ], [ [ "filt_map = row_df[(row_df['City'] == 'flagstaff')]\nfilt_dm, filt_map = filter_dm_and_map(dm, filt_map)\nrun_dists = get_within_between_distances(filt_map, filt_dm, 'Run')\nrun_dists['Category'] = 'Run (n=357)'\npermanova(filt_dm, filt_map, column='Run', permutations=999)", "_____no_output_____" ] ], [ [ "###Material", "_____no_output_____" ] ], [ [ "filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]\nfilt_dm, filt_map = filter_dm_and_map(dm, filt_map)\n\nmaterial_dists = get_within_between_distances(filt_map, filt_dm, 'Material')\nmaterial_dists['Category'] = 'Material (n=198)'\n\npermanova(filt_dm, filt_map, column='Material', permutations=999)", "_____no_output_____" ], [ "all_dists = material_dists.append(row_dists).append(plate_dists).append(run_dists)", "_____no_output_____" ], [ "with plt.rc_context(dict(sns.axes_style(\"darkgrid\"),\n **sns.plotting_context(\"notebook\", font_scale=1.8))):\n plt.figure(figsize=(20,11))\n ax = sns.boxplot(x=\"Category\", y=\"Distance\", hue=\"Groups\", hue_order=['Within', 'Between'], data=all_dists, palette=sns.color_palette(['#f1fabb', '#2259a6']))\n ax.set_ylim([0.9, 1.02])\n ax.set_xlabel('')\n ax.set_title('Binary-Jaccard')\n plt.legend(loc='upper right')\n \n plt.savefig('figure-3-its-B.svg', dpi=300)\n ", "_____no_output_____" ], [ "dm = skbio.DistanceMatrix.read(join(home, '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_open/bdiv_even999/bray_curtis_dm.txt'))\n", "_____no_output_____" ] ], [ [ "##Row Distances", "_____no_output_____" ] ], [ [ "filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]\nfilt_dm, filt_map = filter_dm_and_map(dm, filt_map)\n\nrow_dists = get_within_between_distances(filt_map, filt_dm, 'Row')\nrow_dists['Category'] = 'Row (n=198)'\n\npermanova(filt_dm, filt_map, column='Row', permutations=999)", "_____no_output_____" ] ], [ [ "##Plate Location", "_____no_output_____" ] ], [ [ "plate_dists = get_within_between_distances(filt_map, filt_dm, 'PlateLocation')\nplate_dists['Category'] = 'Plate Location (n=198)'\n\npermanova(filt_dm, filt_map, column='PlateLocation', permutations=999)", "_____no_output_____" ] ], [ [ "##Run", "_____no_output_____" ] ], [ [ "filt_map = row_df[(row_df['City'] == 'flagstaff')]\nfilt_dm, filt_map = filter_dm_and_map(dm, filt_map)\nrun_dists = get_within_between_distances(filt_map, filt_dm, 'Run')\nrun_dists['Category'] = 'Run (n=357)'\npermanova(filt_dm, filt_map, column='Run', permutations=999)", "_____no_output_____" ] ], [ [ "##Material", "_____no_output_____" ] ], [ [ "filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]\nfilt_dm, filt_map = filter_dm_and_map(dm, filt_map)\n\nmaterial_dists = get_within_between_distances(filt_map, filt_dm, 'Material')\nmaterial_dists['Category'] = 'Material (n=198)'\n\npermanova(filt_dm, filt_map, column='Material', permutations=999)", "_____no_output_____" ], [ "all_dists = material_dists.append(row_dists).append(plate_dists).append(run_dists)", "_____no_output_____" ], [ "with plt.rc_context(dict(sns.axes_style(\"darkgrid\"),\n **sns.plotting_context(\"notebook\", font_scale=1.8))):\n plt.figure(figsize=(20,11))\n ax = sns.boxplot(x=\"Category\", y=\"Distance\", hue=\"Groups\", hue_order=['Within', 'Between'], data=all_dists, palette=sns.color_palette(['#f1fabb', '#2259a6']))\n ax.set_ylim([0.9, 1.02])\n ax.set_xlabel('')\n ax.set_title('Bray-Curtis')\n plt.legend(loc='upper right')\n \n plt.savefig('figure-3-its-C.svg', dpi=300)", "_____no_output_____" ] ], [ [ "ANCOM\n-----", "_____no_output_____" ] ], [ [ "table_fp = join(home, 'core_div_out/table_even1000.txt')\ntable = pd.read_csv(table_fp, sep='\\t', skiprows=1, index_col=0).T\ntable.index = table.index.astype(str)", "/home/johnchase/.conda/envs/da/lib/python3.5/site-packages/IPython/core/interactiveshell.py:2723: DtypeWarning: Columns (0) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ], [ "table_ancom = table.loc[:, table[:3].sum(axis=0) > 0]", "_____no_output_____" ], [ "table_ancom = pd.DataFrame(multiplicative_replacement(table_ancom), index=table_ancom.index, columns=table_ancom.columns)\ntable_ancom.dropna(axis=0, inplace=True)\nintersect_ids = set(row_md.index).intersection(set(table_ancom.index))\nrow_md_ancom = row_md.loc[intersect_ids, ]\ntable_ancom = table_ancom.loc[intersect_ids, ]", "_____no_output_____" ], [ "%time\nresults = ancom(table_ancom, row_md_ancom['Row'])", "_____no_output_____" ], [ "sigs = results[results['reject'] == True]", "_____no_output_____" ], [ "tax_fp = '/home/office-microbe-files/pick_otus_out_97/uclust_assigned_taxonomy/rep_set_tax_assignments.txt'\ntaxa_map = pd.read_csv(tax_fp, sep='\\t', index_col=0, names=['Taxa', 'none', 'none'])\ntaxa_map.drop('none', axis=1, inplace=True)\ntaxa_map.index = taxa_map.index.astype(str)", "_____no_output_____" ], [ "taxa_map.loc[sigs.sort_values('W').index.astype(str)]", "_____no_output_____" ], [ "pd.options.display.max_colwidth = 200", "_____no_output_____" ], [ "sigs", "_____no_output_____" ], [ "np.mean(w_dm.data)", "_____no_output_____" ], [ "np.median(w_dm.data)", "_____no_output_____" ], [ "w_dm = skbio.DistanceMatrix.read(join(home, '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_closed/bdiv_even1000/bray_curtis_dm.txt'))\n", "_____no_output_____" ], [ "np.mean(w_dm.data)", "_____no_output_____" ], [ "np.median(w_dm.data)", "_____no_output_____" ], [ "4980239/22783729", "_____no_output_____" ], [ "foo", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05b1cda54b41ffe7f3cde686827f0cc4e24f1b8
18,690
ipynb
Jupyter Notebook
sagemaker-python-sdk/chainer_cifar10/chainer_single_machine_cifar10.ipynb
can-sun/amazon-sagemaker-examples
6908559125336128ef4533d657053828e85a68c6
[ "Apache-2.0" ]
null
null
null
sagemaker-python-sdk/chainer_cifar10/chainer_single_machine_cifar10.ipynb
can-sun/amazon-sagemaker-examples
6908559125336128ef4533d657053828e85a68c6
[ "Apache-2.0" ]
null
null
null
sagemaker-python-sdk/chainer_cifar10/chainer_single_machine_cifar10.ipynb
can-sun/amazon-sagemaker-examples
6908559125336128ef4533d657053828e85a68c6
[ "Apache-2.0" ]
null
null
null
40.542299
507
0.635634
[ [ [ "## Training with Chainer\n\n[VGG](https://arxiv.org/pdf/1409.1556v6.pdf) is an architecture for deep convolution networks. In this example, we train a convolutional network to perform image classification using the CIFAR-10 dataset. CIFAR-10 consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. We'll train a model on SageMaker, deploy it to Amazon SageMaker hosting, and then classify images using the deployed model.\n\nThe Chainer script runs inside of a Docker container running on SageMaker. For more information about the Chainer container, see the sagemaker-chainer-containers repository and the sagemaker-python-sdk repository:\n\n* https://github.com/aws/sagemaker-chainer-containers\n* https://github.com/aws/sagemaker-python-sdk\n\nFor more on Chainer, please visit the Chainer repository:\n\n* https://github.com/chainer/chainer\n\nThis notebook is adapted from the [CIFAR-10](https://github.com/chainer/chainer/tree/master/examples/cifar) example in the Chainer repository.", "_____no_output_____" ] ], [ [ "# Setup\nfrom sagemaker import get_execution_role\nimport sagemaker\n\nsagemaker_session = sagemaker.Session()\n\n# This role retrieves the SageMaker-compatible role used by this Notebook Instance.\nrole = get_execution_role()", "_____no_output_____" ] ], [ [ "## Downloading training and test data\n\nWe use helper functions provided by `chainer` to download and preprocess the CIFAR10 data. ", "_____no_output_____" ] ], [ [ "import chainer\n\nfrom chainer.datasets import get_cifar10\n\ntrain, test = get_cifar10()", "_____no_output_____" ] ], [ [ "## Uploading the data\n\nWe save the preprocessed data to the local filesystem, and then use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value `inputs` identifies the S3 location, which we will use when we start the Training Job.", "_____no_output_____" ] ], [ [ "import os\nimport shutil\n\nimport numpy as np\n\ntrain_data = [element[0] for element in train]\ntrain_labels = [element[1] for element in train]\n\ntest_data = [element[0] for element in test]\ntest_labels = [element[1] for element in test]\n\n\ntry:\n os.makedirs(\"/tmp/data/train_cifar\")\n os.makedirs(\"/tmp/data/test_cifar\")\n np.savez(\"/tmp/data/train_cifar/train.npz\", data=train_data, labels=train_labels)\n np.savez(\"/tmp/data/test_cifar/test.npz\", data=test_data, labels=test_labels)\n train_input = sagemaker_session.upload_data(\n path=os.path.join(\"/tmp\", \"data\", \"train_cifar\"), key_prefix=\"notebook/chainer_cifar/train\"\n )\n test_input = sagemaker_session.upload_data(\n path=os.path.join(\"/tmp\", \"data\", \"test_cifar\"), key_prefix=\"notebook/chainer_cifar/test\"\n )\nfinally:\n shutil.rmtree(\"/tmp/data\")\nprint(\"training data at %s\", train_input)\nprint(\"test data at %s\", test_input)", "_____no_output_____" ] ], [ [ "## Writing the Chainer script to run on Amazon SageMaker\n\n### Training\n\nWe need to provide a training script that can run on the SageMaker platform. The training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as:\n\n* `SM_MODEL_DIR`: A string representing the path to the directory to write model artifacts to.\n These artifacts are uploaded to S3 for model hosting.\n* `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host.\n* `SM_OUTPUT_DIR`: A string representing the filesystem path to write output artifacts to. Output artifacts may\n include checkpoints, graphs, and other files to save, not including model artifacts. These artifacts are compressed\n and uploaded to S3 to the same S3 prefix as the model artifacts.\n\nSupposing two input channels, 'train' and 'test', were used in the call to the Chainer estimator's ``fit()`` method,\nthe following will be set, following the format `SM_CHANNEL_[channel_name]`:\n\n* `SM_CHANNEL_TRAIN`: A string representing the path to the directory containing data in the 'train' channel\n* `SM_CHANNEL_TEST`: Same as above, but for the 'test' channel.\n\nA typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an `argparse.ArgumentParser` instance. For example, the script run by this notebook starts with the following:\n\n```python\nimport argparse\nimport os\n\nif __name__ =='__main__':\n\n parser = argparse.ArgumentParser()\n\n # retrieve the hyperparameters we set from the client (with some defaults)\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--batch-size', type=int, default=64)\n parser.add_argument('--learning-rate', type=float, default=0.05)\n\n # Data, model, and output directories These are required.\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n parser.add_argument('--test', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n \n args, _ = parser.parse_known_args()\n \n num_gpus = int(os.environ['SM_NUM_GPUS'])\n \n # ... load from args.train and args.test, train a model, write model to args.model_dir.\n```\n\nBecause the Chainer container imports your training script, you should always put your training code in a main guard (`if __name__=='__main__':`) so that the container does not inadvertently run your training code at the wrong point in execution.\n\nFor more information about training environment variables, please visit https://github.com/aws/sagemaker-containers.\n\n### Hosting and Inference\n\nWe use a single script to train and host the Chainer model. You can also write separate scripts for training and hosting. In contrast with the training script, the hosting script requires you to implement functions with particular function signatures (or rely on defaults for those functions).\n\nThese functions load your model, deserialize data sent by a client, obtain inferences from your hosted model, and serialize predictions back to a client:\n\n\n* **`model_fn(model_dir)` (always required for hosting)**: This function is invoked to load model artifacts from those that were written into `model_dir` during training.\n\nThe script that this notebook runs uses the following `model_fn` function for hosting:\n```python\ndef model_fn(model_dir):\n chainer.config.train = False\n model = L.Classifier(net.VGG(10))\n serializers.load_npz(os.path.join(model_dir, 'model.npz'), model)\n return model.predictor\n```\n* `input_fn(input_data, content_type)`: This function is invoked to deserialize prediction data when a prediction request is made. The return value is passed to predict_fn. `input_data` is the serialized input data in the body of the prediction request, and `content_type`, the MIME type of the data.\n \n \n* `predict_fn(input_data, model)`: This function accepts the return value of `input_fn` as the `input_data` parameter and the return value of `model_fn` as the `model` parameter and returns inferences obtained from the model.\n \n \n* `output_fn(prediction, accept)`: This function is invoked to serialize the return value from `predict_fn`, which is passed in as the `prediction` parameter, back to the SageMaker client in response to prediction requests.\n\n\n`model_fn` is always required, but default implementations exist for the remaining functions. These default implementations can deserialize a NumPy array, invoking the model's `__call__` method on the input data, and serialize a NumPy array back to the client.\n\nThis notebook relies on the default `input_fn`, `predict_fn`, and `output_fn` implementations. See the Chainer sentiment analysis notebook for an example of how one can implement these hosting functions.\n\nPlease examine the script below. Training occurs behind the main guard, which prevents the function from being run when the script is imported, and `model_fn` loads the model saved into `model_dir` during training.\n\n\n\nFor more on writing Chainer scripts to run on SageMaker, or for more on the Chainer container itself, please see the following repositories: \n\n* For writing Chainer scripts to run on SageMaker: https://github.com/aws/sagemaker-python-sdk\n* For more on the Chainer container and default hosting functions: https://github.com/aws/sagemaker-chainer-containers\n\n", "_____no_output_____" ] ], [ [ "!pygmentize 'src/chainer_cifar_vgg_single_machine.py'", "_____no_output_____" ] ], [ [ "## Running the training script on SageMaker\n\nTo train a model with a Chainer script, we construct a ```Chainer``` estimator using the [sagemaker-python-sdk](https://github.com/aws/sagemaker-python-sdk). We pass in an `entry_point`, the name of a script that contains a couple of functions with certain signatures (`train` and `model_fn`), and a `source_dir`, a directory containing all code to run inside the Chainer container. This script will be run on SageMaker in a container that invokes these functions to train and load Chainer models. \n\nThe ```Chainer``` class allows us to run our training function as a training job on SageMaker infrastructure. We need to configure it with our training script, an IAM role, the number of training instances, and the training instance type. In this case we will run our training job on one `ml.p2.xlarge` instance.", "_____no_output_____" ] ], [ [ "from sagemaker.chainer.estimator import Chainer\n\nchainer_estimator = Chainer(\n entry_point=\"chainer_cifar_vgg_single_machine.py\",\n source_dir=\"src\",\n role=role,\n sagemaker_session=sagemaker_session,\n train_instance_count=1,\n train_instance_type=\"ml.p2.xlarge\",\n hyperparameters={\"epochs\": 50, \"batch-size\": 64},\n)\n\nchainer_estimator.fit({\"train\": train_input, \"test\": test_input})", "_____no_output_____" ] ], [ [ "Our Chainer script writes various artifacts, such as plots, to a directory `output_data_dir`, the contents of which which SageMaker uploads to S3. Now we download and extract these artifacts.", "_____no_output_____" ] ], [ [ "from s3_util import retrieve_output_from_s3\n\nchainer_training_job = chainer_estimator.latest_training_job.name\n\ndesc = sagemaker_session.sagemaker_client.describe_training_job(\n TrainingJobName=chainer_training_job\n)\noutput_data = desc[\"ModelArtifacts\"][\"S3ModelArtifacts\"].replace(\"model.tar.gz\", \"output.tar.gz\")\n\nretrieve_output_from_s3(output_data, \"output/single_machine_cifar\")", "_____no_output_____" ] ], [ [ "These plots show the accuracy and loss over epochs:", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nfrom IPython.display import display\n\naccuracy_graph = Image(filename=\"output/single_machine_cifar/accuracy.png\", width=800, height=800)\nloss_graph = Image(filename=\"output/single_machine_cifar/loss.png\", width=800, height=800)\n\ndisplay(accuracy_graph, loss_graph)", "_____no_output_____" ] ], [ [ "## Deploying the Trained Model\n\nAfter training, we use the Chainer estimator object to create and deploy a hosted prediction endpoint. We can use a CPU-based instance for inference (in this case an `ml.m4.xlarge`), even though we trained on GPU instances.\n\nThe predictor object returned by `deploy` lets us call the new endpoint and perform inference on our sample images. ", "_____no_output_____" ] ], [ [ "predictor = chainer_estimator.deploy(initial_instance_count=1, instance_type=\"ml.m4.xlarge\")", "_____no_output_____" ] ], [ [ "### CIFAR10 sample images\n\nWe'll use these CIFAR10 sample images to test the service:\n\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/airplane1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/automobile1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/bird1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/cat1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/deer1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/dog1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/frog1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/horse1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/ship1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/truck1.png\" />\n\n", "_____no_output_____" ], [ "## Predicting using SageMaker Endpoint\n\nWe batch the images together into a single NumPy array to obtain multiple inferences with a single prediction request.", "_____no_output_____" ] ], [ [ "from skimage import io\nimport numpy as np\n\n\ndef read_image(filename):\n img = io.imread(filename)\n img = np.array(img).transpose(2, 0, 1)\n img = np.expand_dims(img, axis=0)\n img = img.astype(np.float32)\n img *= 1.0 / 255.0\n img = img.reshape(3, 32, 32)\n return img\n\n\ndef read_images(filenames):\n return np.array([read_image(f) for f in filenames])\n\n\nfilenames = [\n \"images/airplane1.png\",\n \"images/automobile1.png\",\n \"images/bird1.png\",\n \"images/cat1.png\",\n \"images/deer1.png\",\n \"images/dog1.png\",\n \"images/frog1.png\",\n \"images/horse1.png\",\n \"images/ship1.png\",\n \"images/truck1.png\",\n]\n\nimage_data = read_images(filenames)", "_____no_output_____" ] ], [ [ "The predictor runs inference on our input data and returns a list of predictions whose argmax gives the predicted label of the input data. ", "_____no_output_____" ] ], [ [ "response = predictor.predict(image_data)\n\nfor i, prediction in enumerate(response):\n print(\"image {}: prediction: {}\".format(i, prediction.argmax(axis=0)))", "_____no_output_____" ] ], [ [ "## Cleanup\n\nAfter you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it.", "_____no_output_____" ] ], [ [ "chainer_estimator.delete_endpoint()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d05b24985b1b836dab30ec35a82e2d824f95b5b7
270,724
ipynb
Jupyter Notebook
meetup.ipynb
sebastiandres/talk_2016_04_python_meetup_sklearn
39a97af6d5053c78ff14713d0bdfda38586b2a65
[ "MIT" ]
null
null
null
meetup.ipynb
sebastiandres/talk_2016_04_python_meetup_sklearn
39a97af6d5053c78ff14713d0bdfda38586b2a65
[ "MIT" ]
null
null
null
meetup.ipynb
sebastiandres/talk_2016_04_python_meetup_sklearn
39a97af6d5053c78ff14713d0bdfda38586b2a65
[ "MIT" ]
null
null
null
115.941756
52,992
0.842024
[ [ [ "<img src=\"images/utfsm.png\" alt=\"\" width=\"200px\" align=\"right\"/>\n\n# USM Numérica\n## Tema del Notebook\n### Objetivos\n1. Conocer el funcionamiento de la librerìa sklearn de Machine Learning\n2. Aplicar la librerìa sklearn para solucionar problemas de Machine Learning", "_____no_output_____" ], [ "## Sobre el autor\n### Sebastián Flores\n#### ICM UTFSM\n#### [email protected]", "_____no_output_____" ], [ "## Sobre la presentación\n#### Contenido creada en ipython notebook (jupyter)\n#### Versión en Slides gracias a RISE de Damián Avila\nSoftware:\n* python 2.7 o python 3.1\n* pandas 0.16.1\n* sklearn 0.16.1\n\nOpcional:\n* numpy 1.9.2\n* matplotlib 1.3.1", "_____no_output_____" ] ], [ [ "from sklearn import __version__ as vsn\nprint(vsn)", "0.24.1\n" ] ], [ [ "## 0.1 Instrucciones\nLas instrucciones de instalación y uso de un ipython notebook se encuentran en el siguiente [link](link).\n\nDespués de descargar y abrir el presente notebook, recuerden:\n* Desarrollar los problemas de manera secuencial.\n* Guardar constantemente con *`Ctr-S`* para evitar sorpresas.\n* Reemplazar en las celdas de código donde diga *`FIX_ME`* por el código correspondiente.\n* Ejecutar cada celda de código utilizando *`Ctr-Enter`*\n\n## 0.2 Licenciamiento y Configuración\nEjecutar la siguiente celda mediante *`Ctr-Enter`*.", "_____no_output_____" ] ], [ [ "\"\"\"\nIPython Notebook v4.0 para python 3.0\nLibrerías adicionales: numpy, scipy, matplotlib. (EDITAR EN FUNCION DEL NOTEBOOK!!!)\nContenido bajo licencia CC-BY 4.0. Código bajo licencia MIT. \n(c) Sebastian Flores, Christopher Cooper, Alberto Rubio, Pablo Bunout.\n\"\"\"\n# Configuración para recargar módulos y librerías dinámicamente\n%reload_ext autoreload\n%autoreload 2\n\n# Configuración para graficos en línea\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 1.- Sobre la librería sklearn\n#### Historia\n- Nace en 2007, como un Google Summer Project de David Cournapeau. \n- Retomado por Matthieu Brucher para su proyecto de tesis.\n- Desde 2010 con soporte por parte de INRIA.\n- Actualmente +35 colaboradores.", "_____no_output_____" ], [ "## 1.- Sobre la librería sklearn\n#### Instalación\nEn python, con un poco de suerte:\n```\npip install -U scikit-learn\n```\n\nUtilizando Anaconda:\n\n```\nconda install scikit-learn\n```", "_____no_output_____" ], [ "## 1.- Sobre la librería sklearn\n#### ¿Porqué sklearn?\nsklearn viene de scientific toolbox for Machine Learning. \n\nscikit learn para los amigos.\n\nExisten múltiples scikits, que son \"scientific toolboxes\" construidos sobre SciPy: [https://scikits.appspot.com/scikits](https://scikits.appspot.com/scikits).", "_____no_output_____" ], [ "Primero que nada... ¿Qué es Machine Learning?", "_____no_output_____" ], [ "## 2.- Machine Learning 101\n#### Ejemplo\n\nConsideremos un dataset consistente en características de diversos animales.\n\n```\npatas, ancho, largo, alto, peso, especie\n[numero],[metros],[metros],[metros],[kilogramos],[]\n2, 0.6, 0.4, 1.7, 75, humano\n2, 0.6, 0.4, 1.8, 90, humano\n...\n2, 0.5, 0.5, 1.7, 85, humano\n4, 0.2, 0.5, 0,3, 30, gato\n...\n4, 0.25, 0.55, 0.32, 32, gato\n4, 0.5, 0.8, 0.3, 50, perro\n...\n4, 0.4, 0.4, 0.32, 40, perro\n```", "_____no_output_____" ], [ "## 2.- Machine Learning 101\n### Clustering\n\nSupongamos que no nos han dicho la especie de cada animal. \n\n¿Podríamos reconocer las distintas especies? \n\n¿Podríamos reconocer que existen 3 grupos distintos de animales?", "_____no_output_____" ], [ "## 2.- Machine Learning 101\n### Clasificación\n\nSupongamos que conocemos los datos de cada animal y además la especie.\n\nSi alguien llega con las medidas de un animal... ¿podemos decir cuál será la especie?", "_____no_output_____" ], [ "## 2.- Machine Learning 101\n### Regresión\n\nSupongamos que conocemos los datos de cada animal y su especie. \n\nSi alguien llega con los datos de un animal, excepto el peso... ¿podemos predecir el peso que tendrá el animal?", "_____no_output_____" ], [ "## 2.- Machine Learning 101\n### Definiciones\n\n* Los datos utilizados para predecir son predictores (features), y típicamente se llama `X`.\n* El dato que se busca predecir se llama etiqueta (label) y puede ser numérica o categórica, y típicamente se llama `y`.", "_____no_output_____" ], [ "## 3- Generalidades de sklearn\n### Imagen resumen\n\n<img src=\"images/ml_map.png\" alt=\"\" width=\"1400px\" align=\"middle\"/>", "_____no_output_____" ], [ "## 3- Generalidades de sklearn\n### Procedimiento General", "_____no_output_____" ] ], [ [ "from sklearn import HelpfulMethods\nfrom sklearn import AlgorithmIWantToUse\n\n# split data into train and test datasets\n\n# train model with train dataset\n\n# compute error on test dataset\n\n# Optional: Train model with all available data\n\n# Use model for some prediction\n", "_____no_output_____" ] ], [ [ "## 4- Clustering con sklearn\n#### Wine Dataset\n\nLos datos del [Wine Dataset](https://archive.ics.uci.edu/ml/datasets/Wine) son un conjunto de datos clásicos para verificar los algoritmos de clustering. \n\n<img src=\"images/wine.jpg\" alt=\"\" width=\"600px\" align=\"middle\"/>\n\nLos datos corresponden a 3 cultivos diferentes de vinos de la misma región de Italia, y que han sido identificados con las etiquetas 1, 2 y 3.", "_____no_output_____" ], [ "## 4- Clustering con sklearn\n#### Wine Dataset\n\nPara cada tipo de vino se realizado 13 análisis químicos:\n\n1. Alcohol \n2. Malic acid \n3. Ash \n4. Alcalinity of ash \n5. Magnesium \n6. Total phenols \n7. Flavanoids \n8. Nonflavanoid phenols \n9. Proanthocyanins \n10. Color intensity \n11. Hue \n12. OD280/OD315 of diluted wines \n13. Proline \n\n\nLa base de datos contiene 178 muestras distintas en total.", "_____no_output_____" ] ], [ [ "%%bash\nhead data/wine_data.csv", "class,alcohol,malic_acid,ash,alcalinity_of_ash,magnesium,total_phenols,flavanoids,nonflavanoid_phenols,proanthocyanins,color_intensity,hue,OD280-OD315_of_diluted_wines,proline \n1,14.23,1.71,2.43,15.6,127,2.8,3.06,.28,2.29,5.64,1.04,3.92,1065\n1,13.2,1.78,2.14,11.2,100,2.65,2.76,.26,1.28,4.38,1.05,3.4,1050\n1,13.16,2.36,2.67,18.6,101,2.8,3.24,.3,2.81,5.68,1.03,3.17,1185\n1,14.37,1.95,2.5,16.8,113,3.85,3.49,.24,2.18,7.8,.86,3.45,1480\n1,13.24,2.59,2.87,21,118,2.8,2.69,.39,1.82,4.32,1.04,2.93,735\n1,14.2,1.76,2.45,15.2,112,3.27,3.39,.34,1.97,6.75,1.05,2.85,1450\n1,14.39,1.87,2.45,14.6,96,2.5,2.52,.3,1.98,5.25,1.02,3.58,1290\n1,14.06,2.15,2.61,17.6,121,2.6,2.51,.31,1.25,5.05,1.06,3.58,1295\n1,14.83,1.64,2.17,14,97,2.8,2.98,.29,1.98,5.2,1.08,2.85,1045\n" ] ], [ [ "## 4- Clustering con sklearn\n#### Lectura de datos", "_____no_output_____" ] ], [ [ "import pandas as pd\ndata = pd.read_csv(\"data/wine_data.csv\")", "_____no_output_____" ], [ "data", "_____no_output_____" ] ], [ [ "## 4- Clustering con sklearn\n#### Exploración de datos", "_____no_output_____" ] ], [ [ "data.columns", "_____no_output_____" ], [ "data[\"class\"].value_counts()", "_____no_output_____" ], [ "data.describe(include=\"all\")", "_____no_output_____" ] ], [ [ "## 4- Clustering con sklearn\n#### Exploración gráfica de datos", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\ndata.hist(figsize=(12,20))\nplt.show()", "_____no_output_____" ], [ "from matplotlib import pyplot as plt\n#pd.scatter_matrix(data, figsize=(12,12), range_padding=0.2)\n#plt.show()", "_____no_output_____" ] ], [ [ "## 4- Clustering con sklearn\n#### Separación de los datos\nNecesitamos separar los datos en los predictores (features) y las etiquetas (labels)", "_____no_output_____" ] ], [ [ "X = data.drop(\"class\", axis=1)\ntrue_labels = data[\"class\"] -1 # labels deben ser 0, 1, 2, ..., n-1", "_____no_output_____" ] ], [ [ "## 4- Custering\n#### Magnitudes de los datos", "_____no_output_____" ] ], [ [ "print(X.mean())", "alcohol 13.000618\nmalic_acid 2.336348\nash 2.366517\nalcalinity_of_ash 19.494944\nmagnesium 99.741573\ntotal_phenols 2.295112\nflavanoids 2.029270\nnonflavanoid_phenols 0.361854\nproanthocyanins 1.590899\ncolor_intensity 5.058090\nhue 0.957449\nOD280-OD315_of_diluted_wines 2.611685\nproline 746.893258\ndtype: float64\n" ], [ "print(X.std())", "alcohol 0.811827\nmalic_acid 1.117146\nash 0.274344\nalcalinity_of_ash 3.339564\nmagnesium 14.282484\ntotal_phenols 0.625851\nflavanoids 0.998859\nnonflavanoid_phenols 0.124453\nproanthocyanins 0.572359\ncolor_intensity 2.318286\nhue 0.228572\nOD280-OD315_of_diluted_wines 0.709990\nproline 314.907474\ndtype: float64\n" ] ], [ [ "## 4- Clustering con sklearn\n#### Algoritmo de Clustering\nPara Clustering usaremos el algoritmo KMeans. \n\nApliquemos un algoritmo de clustering directamente", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\n\n# Parameters\nn_clusters = 3\n\n# Running the algorithm\nkmeans = KMeans(n_clusters)\nkmeans.fit(X)\npred_labels = kmeans.labels_\n\ncm = confusion_matrix(true_labels, pred_labels)\nprint(cm)", "[[ 0 46 13]\n [50 1 20]\n [19 0 29]]\n" ] ], [ [ "## 4- Clustering con sklearn\n#### Normalizacion de datos\nResulta conveniente escalar los datos, para que el algoritmo de clustering funcione mejor", "_____no_output_____" ] ], [ [ "from sklearn import preprocessing\nX_scaled = preprocessing.scale(X)", "_____no_output_____" ], [ "print(X_scaled.mean())", "-1.2282501914608474e-16\n" ], [ "print(X_scaled.std())", "1.0\n" ] ], [ [ "## 4- Clustering con sklearn\n#### Algoritmo de Clustering\nAhora podemos aplicar un algoritmo de clustering", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\n\n# Parameters\nn_clusters = 3\n\n# Running the algorithm\nkmeans = KMeans(n_clusters)\nkmeans.fit(X_scaled)\npred_labels = kmeans.labels_\n\ncm = confusion_matrix(true_labels, pred_labels)\nprint(cm)", "[[ 0 59 0]\n [ 3 3 65]\n [48 0 0]]\n" ] ], [ [ "## 4- Clustering con sklearn\n#### Regla del codo\nEn todos los casos hemos utilizado que el número de clusters es igual a 3. En caso que no conociéramos este dato, deberíamos graficar la suma de las distancias a los clusters para cada punto, en función del número de clusters.", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\n\nclusters = range(2,20)\ntotal_distance = []\nfor n_clusters in clusters:\n kmeans = KMeans(n_clusters)\n kmeans.fit(X_scaled)\n pred_labels = kmeans.labels_\n centroids = kmeans.cluster_centers_\n # Get the distances\n distance_for_n = 0\n for k in range(n_clusters):\n points = X_scaled[pred_labels==k]\n aux = (points - centroids[k,:])**2\n distance_for_n += (aux.sum(axis=1)**0.5).sum()\n total_distance.append(distance_for_n)", "_____no_output_____" ] ], [ [ "## 4- Clustering con sklearn\nGraficando lo anterior, obtenemos", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\nfig = plt.figure(figsize=(16,8))\nplt.plot(clusters, total_distance, 'rs')\nplt.xlim(min(clusters)-1, max(clusters)+1)\nplt.ylim(0, max(total_distance)*1.1)\nplt.show()", "_____no_output_____" ] ], [ [ "## 4- Clustering con sklearn\n¿Qué tan dificil es usar otro algoritmo de clustering?", "_____no_output_____" ], [ "Nada dificil.", "_____no_output_____" ], [ "Algoritmos disponibles:\n* K-Means\n* Mini-batch K-means\n* Affinity propagation\n* Mean-shift\n* Spectral clustering\n* Ward hierarchical clustering\n* Agglomerative clustering\n* DBSCAN\n* Gaussian mixtures\n* Birch\n\nLista con detalles: [http://scikit-learn.org/stable/modules/clustering.html](http://scikit-learn.org/stable/modules/clustering.html)", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import preprocessing\n\n# Normalization of data\nX_scaled = preprocessing.scale(X)\n\n# Running the algorithm\nkmeans = KMeans(n_clusters=3)\nkmeans.fit(X_scaled)\npred_labels = kmeans.labels_\n\n# Evaluating the output\ncm = confusion_matrix(true_labels, pred_labels)\nprint(cm)", "[[ 0 59 0]\n [ 3 3 65]\n [48 0 0]]\n" ], [ "from sklearn.cluster import MiniBatchKMeans\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import preprocessing\n\n# Normalization of data\nX_scaled = preprocessing.scale(X)\n\n# Running the algorithm\nkmeans = MiniBatchKMeans(n_clusters=3)\nkmeans.fit(X_scaled)\npred_labels = kmeans.labels_\n\n# Evaluating the output\ncm = confusion_matrix(true_labels, pred_labels)\nprint(cm)", "[[ 0 0 59]\n [ 2 65 4]\n [48 0 0]]\n" ], [ "from sklearn.cluster import AffinityPropagation\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import preprocessing\n\n# Normalization of data\nX_scaled = preprocessing.scale(X)\n\n# Running the algorithm\nkmeans = AffinityPropagation(preference=-300)\nkmeans.fit(X_scaled)\npred_labels = kmeans.labels_\n\n# Evaluating the output\ncm = confusion_matrix(true_labels, pred_labels)\nprint(cm)", "[[49 10 0]\n [ 3 58 10]\n [ 2 0 46]]\n" ] ], [ [ "## 5- Clasificación\n#### Reconocimiento de dígitos\nLos datos se encuentran en 2 archivos, `data/optdigits.train` y `data/optdigits.test`. \n\nComo su nombre lo indica, el set `data/optdigits.train` contiene los ejemplos que deben ser usados para entrenar el modelo, mientras que el set `data/optdigits.test` se utilizará para obtener una estimación del error de predicción.\n\nAmbos archivos comparten el mismo formato: cada línea contiene 65 valores. Los 64 primeros corresponden a la representación de la imagen en escala de grises (0-blanco, 255-negro), y el valor 65 corresponde al dígito de la imágen (0-9).", "_____no_output_____" ], [ "## 5- Clasificación\n#### Cargando los datos\n\nPara cargar los datos, utilizamos np.loadtxt con los parámetros extra delimiter (para indicar que el separador será en esta ocasión una coma) y con el dype np.int8 (para que su representación en memoria sea la mínima posible, 8 bits en vez de 32/64 bits para un float).", "_____no_output_____" ] ], [ [ "import numpy as np\n\nXY_tv = np.loadtxt(\"data/optdigits.train\", delimiter=\",\", dtype=np.int8)\nprint(XY_tv)\nX_tv = XY_tv[:,:64]\nY_tv = XY_tv[:, 64]\n\nprint(X_tv.shape)\nprint(Y_tv.shape)\nprint(X_tv[0,:])\nprint(X_tv[0,:].reshape(8,8))\nprint(Y_tv[0])", "[[ 0 1 6 ... 0 0 0]\n [ 0 0 10 ... 0 0 0]\n [ 0 0 8 ... 0 0 7]\n ...\n [ 0 0 3 ... 0 0 6]\n [ 0 0 6 ... 5 0 6]\n [ 0 0 2 ... 0 0 7]]\n(3823, 64)\n(3823,)\n[ 0 1 6 15 12 1 0 0 0 7 16 6 6 10 0 0 0 8 16 2 0 11 2 0\n 0 5 16 3 0 5 7 0 0 7 13 3 0 8 7 0 0 4 12 0 1 13 5 0\n 0 0 14 9 15 9 0 0 0 0 6 14 7 1 0 0]\n[[ 0 1 6 15 12 1 0 0]\n [ 0 7 16 6 6 10 0 0]\n [ 0 8 16 2 0 11 2 0]\n [ 0 5 16 3 0 5 7 0]\n [ 0 7 13 3 0 8 7 0]\n [ 0 4 12 0 1 13 5 0]\n [ 0 0 14 9 15 9 0 0]\n [ 0 0 6 14 7 1 0 0]]\n0\n" ] ], [ [ "## 5- Clasificación\n#### Visualizando los datos\n\nPara visualizar los datos utilizaremos el método imshow de pyplot. Resulta necesario convertir el arreglo desde las dimensiones (1,64) a (8,8) para que la imagen sea cuadrada y pueda distinguirse el dígito. Superpondremos además el label correspondiente al dígito, mediante el método text. Realizaremos lo anterior para los primeros 25 datos del archivo.", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n\n# Well plot the first nx*ny examples\nnx, ny = 5, 5\nfig, ax = plt.subplots(nx, ny, figsize=(12,12))\nfor i in range(nx):\n for j in range(ny):\n index = j+ny*i\n data = X_tv[index,:].reshape(8,8)\n label = Y_tv[index]\n ax[i][j].imshow(data, interpolation='nearest', cmap=plt.get_cmap('gray_r'))\n ax[i][j].text(7, 0, str(int(label)), horizontalalignment='center',\n verticalalignment='center', fontsize=10, color='blue')\n ax[i][j].get_xaxis().set_visible(False)\n ax[i][j].get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "## 5- Clasificación\n#### Entrenamiento trivial\nPara clasificar utilizaremos el algoritmo K Nearest Neighbours.\n\nEntrenaremos el modelo con 1 vecino y verificaremos el error de predicción en el set de entrenamiento.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\n\nk = 1\nkNN = KNeighborsClassifier(n_neighbors=k)\nkNN.fit(X_tv, Y_tv)\nY_pred = kNN.predict(X_tv)\nn_errors = sum(Y_pred!=Y_tv)\nprint(\"Hay %d errores de un total de %d ejemplos de entrenamiento\" %(n_errors, len(Y_tv)))", "Hay 0 errores de un total de 3823 ejemplos de entrenamiento\n" ] ], [ [ "¡La mejor predicción del punto es el mismo punto! \n\nPero esto generalizaría catastróficamente.\n\nEs importantísimo **entrenar** en un set de datos y luego probar como generaliza/funciona en un set **completamente nuevo**.", "_____no_output_____" ], [ "## 5- Clasificación\n#### Seleccionando el número adecuado de vecinos\n\nBuscando el valor de k más apropiado\n\nA partir del análisis del punto anterior, nos damos cuenta de la necesidad de:\n1. Calcular el error en un set distinto al utilizado para entrenar.\n2. Calcular el mejor valor de vecinos para el algoritmo.\n\n(Esto tomará un tiempo)", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\n\ntemplate = \"k={0:,d}: {1:.1f} +- {2:.1f} errores de clasificación de un total de {3:,d} puntos\"\n# Fitting the model\nmean_error_for_k = []\nstd_error_for_k = []\nk_range = range(1,8)\nfor k in k_range:\n errors_k = []\n for i in range(10):\n kNN = KNeighborsClassifier(n_neighbors=k)\n X_train, X_valid, Y_train, Y_valid = train_test_split(X_tv, Y_tv, train_size=0.75)\n kNN.fit(X_train, Y_train)\n # Predicting values\n Y_valid_pred = kNN.predict(X_valid)\n # Count the errors\n n_errors = sum(Y_valid!=Y_valid_pred)\n # Add them to vector\n errors_k.append(100.*n_errors/len(Y_valid))\n\n errors = np.array(errors_k)\n print(template.format(k, errors.mean(), errors.std(), len(Y_valid)))\n mean_error_for_k.append(errors.mean())\n std_error_for_k.append(errors.std())", "k=1: 1.6 +- 0.3 errores de clasificación de un total de 956 puntos\nk=2: 2.3 +- 0.5 errores de clasificación de un total de 956 puntos\nk=3: 1.6 +- 0.3 errores de clasificación de un total de 956 puntos\nk=4: 2.0 +- 0.4 errores de clasificación de un total de 956 puntos\nk=5: 1.7 +- 0.2 errores de clasificación de un total de 956 puntos\nk=6: 1.8 +- 0.3 errores de clasificación de un total de 956 puntos\nk=7: 1.7 +- 0.4 errores de clasificación de un total de 956 puntos\n" ] ], [ [ "## 5- Clasificación\nPodemos visualizar los datos anteriores utilizando el siguiente código, que requiere que `sd_error_for k` y `mean_error_for_k` hayan sido apropiadamente definidos.", "_____no_output_____" ] ], [ [ "mean = np.array(mean_error_for_k)\nstd = np.array(std_error_for_k)\nplt.figure(figsize=(12,8))\nplt.plot(k_range, mean - std, \"k:\")\nplt.plot(k_range, mean , \"r.-\")\nplt.plot(k_range, mean + std, \"k:\")\nplt.xlabel(\"Numero de vecinos k\")\nplt.ylabel(\"Error de clasificacion\")\nplt.show()", "_____no_output_____" ] ], [ [ "## 5- Clasificación\n#### Entrenando todo el modelo\nA partir de lo anterior, se fija el número de vecinos $k=3$ y se procede a entrenar el modelo con todos los datos. ", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nk = 3\nkNN = KNeighborsClassifier(n_neighbors=k)\nkNN.fit(X_tv, Y_tv)", "_____no_output_____" ] ], [ [ "## 5- Clasificación\n#### Predicción en testing dataset\n\nAhora que el modelo kNN ha sido completamente entrenado, calcularemos el error de predicción en un set de datos completamente nuevo: el set de testing. ", "_____no_output_____" ] ], [ [ "# Cargando el archivo data/optdigits.tes\nXY_test = np.loadtxt(\"data/optdigits.test\", delimiter=\",\")\nX_test = XY_test[:,:64]\nY_test = XY_test[:, 64]\n# Predicción de etiquetas\nY_pred = kNN.predict(X_test)", "_____no_output_____" ] ], [ [ "## 5- Clasificación\nPuesto que tenemos las etiquetas verdaderas en el set de entrenamiento, podemos visualizar que números han sido correctamente etiquetados.", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n\n# Mostrar los datos correctos\nmask = (Y_pred==Y_test)\nX_aux = X_test[mask]\nY_aux_true = Y_test[mask]\nY_aux_pred = Y_pred[mask]\n\n# We'll plot the first 100 examples, randomly choosen\nnx, ny = 5, 5\nfig, ax = plt.subplots(nx, ny, figsize=(12,12))\nfor i in range(nx):\n for j in range(ny):\n index = j+ny*i\n data = X_aux[index,:].reshape(8,8)\n label_pred = str(int(Y_aux_pred[index]))\n label_true = str(int(Y_aux_true[index]))\n ax[i][j].imshow(data, interpolation='nearest', cmap=plt.get_cmap('gray_r'))\n ax[i][j].text(0, 0, label_pred, horizontalalignment='center',\n verticalalignment='center', fontsize=10, color='green')\n ax[i][j].text(7, 0, label_true, horizontalalignment='center',\n verticalalignment='center', fontsize=10, color='blue')\n ax[i][j].get_xaxis().set_visible(False)\n ax[i][j].get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "## 5- Clasificación\n#### Visualización de etiquetas incorrectas\nMás interesante que el gráfico anterior, resulta considerar los casos donde los dígitos han sido incorrectamente etiquetados. ", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n\n# Mostrar los datos correctos\nmask = (Y_pred!=Y_test)\nX_aux = X_test[mask]\nY_aux_true = Y_test[mask]\nY_aux_pred = Y_pred[mask]\n\n# We'll plot the first 100 examples, randomly choosen\nnx, ny = 5, 5\nfig, ax = plt.subplots(nx, ny, figsize=(12,12))\nfor i in range(nx):\n for j in range(ny):\n index = j+ny*i\n data = X_aux[index,:].reshape(8,8)\n label_pred = str(int(Y_aux_pred[index]))\n label_true = str(int(Y_aux_true[index]))\n ax[i][j].imshow(data, interpolation='nearest', cmap=plt.get_cmap('gray_r'))\n ax[i][j].text(0, 0, label_pred, horizontalalignment='center',\n verticalalignment='center', fontsize=10, color='red')\n ax[i][j].text(7, 0, label_true, horizontalalignment='center',\n verticalalignment='center', fontsize=10, color='blue')\n ax[i][j].get_xaxis().set_visible(False)\n ax[i][j].get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "## 5- Clasificación\n#### Análisis del error\n\nDespués de la exploración visual de los resultados, queremos obtener el error de predicción real del modelo.\n\n¿Existen dígitos más fáciles o difíciles de clasificar?", "_____no_output_____" ] ], [ [ "# Error global\nmask = (Y_pred!=Y_test)\nerror_prediccion = 100.*sum(mask) / len(mask)\nprint(\"Error de predicción total de {0:.1f} %\".format(error_prediccion))\n\nfor digito in range(0,10):\n mask_digito = Y_test==digito\n Y_test_digito = Y_test[mask_digito] \n Y_pred_digito = Y_pred[mask_digito]\n mask = Y_test_digito!=Y_pred_digito\n error_prediccion = 100.*sum((Y_pred_digito!=Y_test_digito)) / len(Y_pred_digito)\n print(\"Error de predicción para digito {0:d} de {1:.1f} %\".format(digito, error_prediccion))", "Error de predicción total de 2.2 %\nError de predicción para digito 0 de 0.0 %\nError de predicción para digito 1 de 1.1 %\nError de predicción para digito 2 de 2.3 %\nError de predicción para digito 3 de 1.1 %\nError de predicción para digito 4 de 1.7 %\nError de predicción para digito 5 de 1.6 %\nError de predicción para digito 6 de 0.0 %\nError de predicción para digito 7 de 3.9 %\nError de predicción para digito 8 de 6.9 %\nError de predicción para digito 9 de 3.3 %\n" ] ], [ [ "## 5- Clasificación\n#### Análisis del error (cont. de)\n\nEl siguiente código muestra el error de clasificación, permitiendo verificar que números son confundibles", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix as cm\ncm = cm(Y_test, Y_pred)\nprint(cm)", "[[178 0 0 0 0 0 0 0 0 0]\n [ 0 180 0 0 0 0 1 0 1 0]\n [ 0 4 173 0 0 0 0 0 0 0]\n [ 0 0 0 181 0 0 0 1 1 0]\n [ 0 2 0 0 178 0 0 0 1 0]\n [ 0 0 0 1 1 179 0 0 0 1]\n [ 0 0 0 0 0 0 181 0 0 0]\n [ 0 0 0 0 0 0 0 172 1 6]\n [ 0 9 0 1 0 0 0 0 162 2]\n [ 0 0 0 4 0 1 0 0 1 174]]\n" ], [ "# As in http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\ndef plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.jet):\n plt.figure(figsize=(10,10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(10)\n plt.xticks(tick_marks, tick_marks)\n plt.yticks(tick_marks, tick_marks)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return None\n\n# Compute confusion matrix\nplt.figure()\nplot_confusion_matrix(cm)", "_____no_output_____" ], [ "# Normalize the confusion matrix by row (i.e by the number of samples in each class)\ncm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nplot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')", "_____no_output_____" ] ], [ [ "## 5- Clasificación\nA partir de lo anterior, vemos observamos que los mayores errores son:\n* El 2 puede clasificarse erróneamente como 1 (pero no viceversa).\n* El 7 puede clasificarse erróneamente como 9 (pero no viceversa).\n* El 8 puede clasificarse erróneamente como 1 (pero no viceversa).\n* El 9 puede clasificarse erróneamente como 3 (pero no viceversa).", "_____no_output_____" ], [ "## 5- Clasificación\n#### Preguntas\n¿Es éste el mejor método de clasificación? ¿Qué otros métodos pueden utilizarse?", "_____no_output_____" ], [ "Múltiples familias de algoritmos:\n* Logistic Regression\n* Naive Bayes\n* Decision Trees\n* Random Forests\n* Support Vector Machines\n* Neural Networks\n* Etc etc\n\nlink: [http://scikit-learn.org/stable/supervised_learning.html](http://scikit-learn.org/stable/supervised_learning.html)", "_____no_output_____" ], [ "## 5- Conclusión\n\n\nSklearn tiene muchos algoritmos implementados y es fácil de usar.\n\nSin embargo, hay que tener presente GIGO: Garbage In, Garbage Out:\n* Exploración y visualización inicial de datos.\n* Limpieza de datos\n* Utilización del algoritmo requiere conocer su funconamiento para mejor tuneo de parámetros.\n* Es bueno y fácil probar más de un algoritmo. ", "_____no_output_____" ], [ "## 5- Conclusión\n\n\nY por último:\n\n* Aplicación de algoritmos de ML es delicado porque requiere (1) conocer bien los datos y (2) entender las limitaciones del algoritmo. \n* Considerar siempre una muestra para entrenamiento y una muestra para testeo: predicción es inútil si no se entrega un margen de error para la predicción.\n\n", "_____no_output_____" ], [ "## 5- Conclusión\n\n\n# ¡Gracias!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d05b2dc51713f83b3a24a4254b0f8a3caf00dcd1
179,200
ipynb
Jupyter Notebook
legacy/arkady TF legacy/TF_2020_course4_week4_Answer.ipynb
21kc-caracol/Acoustic_data_Image_vs_Mean
96801c0dd5c47859086c8b6f145a61333575d9b6
[ "MIT" ]
1
2020-10-23T06:02:41.000Z
2020-10-23T06:02:41.000Z
legacy/arkady TF legacy/TF_2020_course4_week4_Answer.ipynb
21kc-caracol/Acoustic_data_Image_vs_Mean
96801c0dd5c47859086c8b6f145a61333575d9b6
[ "MIT" ]
null
null
null
legacy/arkady TF legacy/TF_2020_course4_week4_Answer.ipynb
21kc-caracol/Acoustic_data_Image_vs_Mean
96801c0dd5c47859086c8b6f145a61333575d9b6
[ "MIT" ]
null
null
null
204.56621
83,144
0.844727
[ [ [ "import tensorflow as tf\nprint(tf.__version__)", "2.0.0\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\ndef plot_series(time, series, format=\"-\", start=0, end=None):\n plt.plot(time[start:end], series[start:end], format)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n plt.grid(True)", "_____no_output_____" ], [ "#!wget --no-check-certificate \\\n# https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv \\\n# -O /tmp/daily-min-temperatures.csv", "_____no_output_____" ], [ "root = r'D:\\Users\\Arkady\\Verint\\Coursera_2019_Tensorflow_Specialization\\Course4_Sequences_TimeSeries_Prediction'\nsrcurl = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'", "_____no_output_____" ], [ "#import pandas as pd\n\n#df = pd.read_csv(srcurl)\n#df.to_csv(root + '/tmp/daily-min-temperatures.csv')", "_____no_output_____" ], [ "import csv\ntime_step = []\ntemps = []\n\nwith open(root + '/tmp/daily-min-temperatures.csv') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n next(reader)\n step=0\n for row in reader:\n temps.append(float(row[2]))\n time_step.append(step)\n step = step + 1\n\nseries = np.array(temps)\ntime = np.array(time_step)\nplt.figure(figsize=(10, 6))\nplot_series(time, series)", "_____no_output_____" ], [ "split_time = 2500\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\n\nwindow_size = 30\nbatch_size = 32\nshuffle_buffer_size = 1000\n\n", "_____no_output_____" ], [ "def windowed_dataset(series, window_size, batch_size, shuffle_buffer):\n series = tf.expand_dims(series, axis=-1)\n ds = tf.data.Dataset.from_tensor_slices(series)\n ds = ds.window(window_size + 1, shift=1, drop_remainder=True)\n ds = ds.flat_map(lambda w: w.batch(window_size + 1))\n ds = ds.shuffle(shuffle_buffer)\n ds = ds.map(lambda w: (w[:-1], w[1:]))\n return ds.batch(batch_size).prefetch(1)", "_____no_output_____" ], [ "def model_forecast(model, series, window_size):\n ds = tf.data.Dataset.from_tensor_slices(series)\n ds = ds.window(window_size, shift=1, drop_remainder=True)\n ds = ds.flat_map(lambda w: w.batch(window_size))\n ds = ds.batch(32).prefetch(1)\n forecast = model.predict(ds)\n return forecast", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\ntf.random.set_seed(51)\nnp.random.seed(51)\nwindow_size = 64\nbatch_size = 256\ntrain_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\nprint(train_set)\nprint(x_train.shape)\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv1D(filters=32, kernel_size=5,\n strides=1, padding=\"causal\",\n activation=\"relu\",\n input_shape=[None, 1]),\n tf.keras.layers.LSTM(64, return_sequences=True),\n tf.keras.layers.LSTM(64, return_sequences=True),\n tf.keras.layers.Dense(30, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(1),\n tf.keras.layers.Lambda(lambda x: x * 400)\n])\n\nlr_schedule = tf.keras.callbacks.LearningRateScheduler(\n lambda epoch: 1e-8 * 10**(epoch / 20))\noptimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)\nmodel.compile(loss=tf.keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\nhistory = model.fit(train_set, epochs=100, callbacks=[lr_schedule])\n\n\n", "<PrefetchDataset shapes: ((None, None, 1), (None, None, 1)), types: (tf.float64, tf.float64)>\n(2500,)\nEpoch 1/100\n10/10 [==============================] - 6s 631ms/step - loss: 31.1549 - mae: 31.6551\nEpoch 2/100\n10/10 [==============================] - 2s 229ms/step - loss: 30.5753 - mae: 31.0783\nEpoch 3/100\n10/10 [==============================] - 2s 225ms/step - loss: 29.7469 - mae: 30.1792\nEpoch 4/100\n10/10 [==============================] - 2s 232ms/step - loss: 28.5816 - mae: 29.0582\nEpoch 5/100\n10/10 [==============================] - 2s 238ms/step - loss: 27.1349 - mae: 27.6970\nEpoch 6/100\n10/10 [==============================] - 2s 236ms/step - loss: 25.5287 - mae: 25.9975\nEpoch 7/100\n10/10 [==============================] - 2s 226ms/step - loss: 23.3412 - mae: 23.8407\nEpoch 8/100\n10/10 [==============================] - 2s 229ms/step - loss: 20.6980 - mae: 21.1120\nEpoch 9/100\n10/10 [==============================] - 2s 223ms/step - loss: 17.4437 - mae: 17.8083\nEpoch 10/100\n10/10 [==============================] - 2s 227ms/step - loss: 13.7512 - mae: 14.1192\nEpoch 11/100\n10/10 [==============================] - 2s 226ms/step - loss: 10.3053 - mae: 10.6376\nEpoch 12/100\n10/10 [==============================] - 2s 245ms/step - loss: 7.6534 - mae: 8.0748\nEpoch 13/100\n10/10 [==============================] - 2s 231ms/step - loss: 6.3056 - mae: 6.7677\nEpoch 14/100\n10/10 [==============================] - 2s 236ms/step - loss: 5.7424 - mae: 6.1923\nEpoch 15/100\n10/10 [==============================] - 2s 234ms/step - loss: 5.3617 - mae: 5.8193\nEpoch 16/100\n10/10 [==============================] - 2s 244ms/step - loss: 4.9649 - mae: 5.4211\nEpoch 17/100\n10/10 [==============================] - 2s 228ms/step - loss: 4.5722 - mae: 5.0347\nEpoch 18/100\n10/10 [==============================] - 2s 226ms/step - loss: 4.2446 - mae: 4.7072\nEpoch 19/100\n10/10 [==============================] - 2s 233ms/step - loss: 3.9757 - mae: 4.4370\nEpoch 20/100\n10/10 [==============================] - 2s 233ms/step - loss: 3.7648 - mae: 4.2190\nEpoch 21/100\n10/10 [==============================] - 2s 229ms/step - loss: 3.5954 - mae: 4.0531\nEpoch 22/100\n10/10 [==============================] - 2s 230ms/step - loss: 3.4795 - mae: 3.9356\nEpoch 23/100\n10/10 [==============================] - 2s 230ms/step - loss: 3.3746 - mae: 3.8415\nEpoch 24/100\n10/10 [==============================] - 2s 238ms/step - loss: 3.3009 - mae: 3.7655\nEpoch 25/100\n10/10 [==============================] - 2s 246ms/step - loss: 3.2372 - mae: 3.6977\nEpoch 26/100\n10/10 [==============================] - 2s 232ms/step - loss: 3.1713 - mae: 3.6339\nEpoch 27/100\n10/10 [==============================] - 2s 245ms/step - loss: 3.1150 - mae: 3.5704\nEpoch 28/100\n10/10 [==============================] - 2s 236ms/step - loss: 3.0381 - mae: 3.5040\nEpoch 29/100\n10/10 [==============================] - 2s 245ms/step - loss: 2.9743 - mae: 3.4380\nEpoch 30/100\n10/10 [==============================] - 2s 224ms/step - loss: 2.9115 - mae: 3.3710\nEpoch 31/100\n10/10 [==============================] - 2s 228ms/step - loss: 2.8481 - mae: 3.3076\nEpoch 32/100\n10/10 [==============================] - 2s 223ms/step - loss: 2.7903 - mae: 3.2493\nEpoch 33/100\n10/10 [==============================] - 2s 229ms/step - loss: 2.7308 - mae: 3.1916\nEpoch 34/100\n10/10 [==============================] - 2s 231ms/step - loss: 2.6765 - mae: 3.1348\nEpoch 35/100\n10/10 [==============================] - 2s 228ms/step - loss: 2.6279 - mae: 3.0869\nEpoch 36/100\n10/10 [==============================] - 2s 226ms/step - loss: 2.5779 - mae: 3.0360\nEpoch 37/100\n10/10 [==============================] - 2s 221ms/step - loss: 2.5291 - mae: 2.9877\nEpoch 38/100\n10/10 [==============================] - 2s 222ms/step - loss: 2.4822 - mae: 2.9408\nEpoch 39/100\n10/10 [==============================] - 2s 221ms/step - loss: 2.4446 - mae: 2.8970\nEpoch 40/100\n10/10 [==============================] - 2s 238ms/step - loss: 2.3994 - mae: 2.8538\nEpoch 41/100\n10/10 [==============================] - 3s 255ms/step - loss: 2.3590 - mae: 2.8118\nEpoch 42/100\n10/10 [==============================] - 2s 246ms/step - loss: 2.3189 - mae: 2.7700\nEpoch 43/100\n10/10 [==============================] - 2s 226ms/step - loss: 2.2793 - mae: 2.7302\nEpoch 44/100\n10/10 [==============================] - 2s 235ms/step - loss: 2.2343 - mae: 2.6897\nEpoch 45/100\n10/10 [==============================] - 2s 236ms/step - loss: 2.2012 - mae: 2.6534\nEpoch 46/100\n10/10 [==============================] - 2s 237ms/step - loss: 2.1663 - mae: 2.6186\nEpoch 47/100\n10/10 [==============================] - 2s 226ms/step - loss: 2.1309 - mae: 2.5873\nEpoch 48/100\n10/10 [==============================] - 2s 228ms/step - loss: 2.1043 - mae: 2.5580\nEpoch 49/100\n10/10 [==============================] - 2s 230ms/step - loss: 2.0843 - mae: 2.5342\nEpoch 50/100\n10/10 [==============================] - 2s 235ms/step - loss: 2.0535 - mae: 2.5051\nEpoch 51/100\n10/10 [==============================] - 2s 230ms/step - loss: 2.0280 - mae: 2.4808\nEpoch 52/100\n10/10 [==============================] - 2s 232ms/step - loss: 2.0094 - mae: 2.4607\nEpoch 53/100\n10/10 [==============================] - 2s 229ms/step - loss: 1.9883 - mae: 2.4382\nEpoch 54/100\n10/10 [==============================] - 2s 227ms/step - loss: 1.9709 - mae: 2.4230\nEpoch 55/100\n10/10 [==============================] - 2s 232ms/step - loss: 1.9475 - mae: 2.3983\nEpoch 56/100\n10/10 [==============================] - 2s 239ms/step - loss: 1.9256 - mae: 2.3799\nEpoch 57/100\n10/10 [==============================] - 3s 256ms/step - loss: 1.9069 - mae: 2.3570\nEpoch 58/100\n10/10 [==============================] - 2s 233ms/step - loss: 1.8953 - mae: 2.3410\nEpoch 59/100\n10/10 [==============================] - 2s 228ms/step - loss: 1.9060 - mae: 2.3603\nEpoch 60/100\n10/10 [==============================] - 2s 221ms/step - loss: 2.2724 - mae: 2.7229\nEpoch 61/100\n10/10 [==============================] - 2s 221ms/step - loss: 2.7288 - mae: 3.1923\nEpoch 62/100\n10/10 [==============================] - 2s 236ms/step - loss: 3.0613 - mae: 3.5175\nEpoch 63/100\n10/10 [==============================] - 2s 233ms/step - loss: 3.4700 - mae: 3.9729\nEpoch 64/100\n10/10 [==============================] - 2s 225ms/step - loss: 3.7631 - mae: 4.2854\nEpoch 65/100\n10/10 [==============================] - 2s 230ms/step - loss: 4.0173 - mae: 4.5429\nEpoch 66/100\n10/10 [==============================] - 2s 225ms/step - loss: 4.3317 - mae: 4.8017\nEpoch 67/100\n10/10 [==============================] - 2s 225ms/step - loss: 4.6335 - mae: 5.0796\nEpoch 68/100\n10/10 [==============================] - 2s 247ms/step - loss: 4.9020 - mae: 5.3453\nEpoch 69/100\n10/10 [==============================] - 2s 238ms/step - loss: 4.9740 - mae: 5.3445\nEpoch 70/100\n10/10 [==============================] - 2s 240ms/step - loss: 7.6903 - mae: 8.3992\nEpoch 71/100\n10/10 [==============================] - 2s 227ms/step - loss: 10.9064 - mae: 11.3023\nEpoch 72/100\n10/10 [==============================] - 2s 221ms/step - loss: 9.6644 - mae: 10.4074\nEpoch 73/100\n10/10 [==============================] - 2s 220ms/step - loss: 10.0171 - mae: 10.2698\nEpoch 74/100\n10/10 [==============================] - 2s 221ms/step - loss: 3.9833 - mae: 4.4559\nEpoch 75/100\n10/10 [==============================] - 2s 221ms/step - loss: 3.0147 - mae: 3.4834\nEpoch 76/100\n10/10 [==============================] - 2s 221ms/step - loss: 3.2445 - mae: 3.7338\nEpoch 77/100\n10/10 [==============================] - 2s 222ms/step - loss: 2.9902 - mae: 3.4611\nEpoch 78/100\n10/10 [==============================] - 2s 220ms/step - loss: 2.6910 - mae: 3.1329\nEpoch 79/100\n10/10 [==============================] - 2s 222ms/step - loss: 2.2900 - mae: 2.7348\nEpoch 80/100\n10/10 [==============================] - 2s 223ms/step - loss: 2.4437 - mae: 2.8806\nEpoch 81/100\n10/10 [==============================] - 2s 227ms/step - loss: 2.9893 - mae: 3.4827\nEpoch 82/100\n10/10 [==============================] - 2s 228ms/step - loss: 3.8285 - mae: 4.3233\nEpoch 83/100\n10/10 [==============================] - 2s 226ms/step - loss: 4.5597 - mae: 5.0344\nEpoch 84/100\n10/10 [==============================] - 3s 253ms/step - loss: 5.2008 - mae: 5.6670\nEpoch 85/100\n10/10 [==============================] - 2s 227ms/step - loss: 5.9173 - mae: 6.4142\nEpoch 86/100\n10/10 [==============================] - 2s 231ms/step - loss: 6.5364 - mae: 6.9143\nEpoch 87/100\n10/10 [==============================] - 2s 227ms/step - loss: 7.8768 - mae: 8.4254\nEpoch 88/100\n10/10 [==============================] - 2s 225ms/step - loss: 8.5242 - mae: 9.1338\nEpoch 89/100\n10/10 [==============================] - 2s 225ms/step - loss: 9.6508 - mae: 10.2404\nEpoch 90/100\n10/10 [==============================] - 2s 228ms/step - loss: 10.9035 - mae: 11.6384\nEpoch 91/100\n10/10 [==============================] - 2s 238ms/step - loss: 39.6707 - mae: 41.9910\nEpoch 92/100\n10/10 [==============================] - 2s 244ms/step - loss: 90.5707 - mae: 89.0393\nEpoch 93/100\n10/10 [==============================] - 2s 231ms/step - loss: 44.4263 - mae: 47.5359\nEpoch 94/100\n10/10 [==============================] - 2s 227ms/step - loss: 91.8916 - mae: 89.2918\nEpoch 95/100\n10/10 [==============================] - 2s 227ms/step - loss: 129.1897 - mae: 131.1622\nEpoch 96/100\n10/10 [==============================] - 2s 225ms/step - loss: 138.6994 - mae: 133.8414\nEpoch 97/100\n10/10 [==============================] - 2s 228ms/step - loss: 170.2142 - mae: 168.2265\nEpoch 98/100\n10/10 [==============================] - 2s 226ms/step - loss: 79.1734 - mae: 89.0613\nEpoch 99/100\n10/10 [==============================] - 2s 237ms/step - loss: 191.5171 - mae: 183.6914\nEpoch 100/100\n10/10 [==============================] - 2s 228ms/step - loss: 69.0531 - mae: 70.9478\n" ], [ "plt.semilogx(history.history[\"lr\"], history.history[\"loss\"])\nplt.axis([1e-8, 1e-4, 0, 60])", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\ntf.random.set_seed(51)\nnp.random.seed(51)\ntrain_set = windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv1D(filters=60, kernel_size=5,\n strides=1, padding=\"causal\",\n activation=\"relu\",\n input_shape=[None, 1]),\n tf.keras.layers.LSTM(60, return_sequences=True),\n tf.keras.layers.LSTM(60, return_sequences=True),\n tf.keras.layers.Dense(30, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(1),\n tf.keras.layers.Lambda(lambda x: x * 400)\n])\n\n\noptimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)\nmodel.compile(loss=tf.keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\nhistory = model.fit(train_set,epochs=150)", "Epoch 1/150\n25/25 [==============================] - 6s 240ms/step - loss: 9.9624 - mae: 10.5789\nEpoch 2/150\n25/25 [==============================] - 3s 109ms/step - loss: 2.5933 - mae: 3.0493\nEpoch 3/150\n25/25 [==============================] - 3s 106ms/step - loss: 1.9328 - mae: 2.3879\nEpoch 4/150\n25/25 [==============================] - 3s 106ms/step - loss: 1.8620 - mae: 2.3160\nEpoch 5/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.8200 - mae: 2.2715\nEpoch 6/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.7918 - mae: 2.2419\nEpoch 7/150\n25/25 [==============================] - 3s 112ms/step - loss: 1.7631 - mae: 2.2129\nEpoch 8/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.7460 - mae: 2.1938\nEpoch 9/150\n25/25 [==============================] - 3s 116ms/step - loss: 1.7219 - mae: 2.1688\nEpoch 10/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.7014 - mae: 2.1483\nEpoch 11/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.6838 - mae: 2.1305\nEpoch 12/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.6662 - mae: 2.1134\nEpoch 13/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.6559 - mae: 2.1020\nEpoch 14/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.6440 - mae: 2.0905\nEpoch 15/150\n25/25 [==============================] - 3s 123ms/step - loss: 1.6326 - mae: 2.0777\nEpoch 16/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.6254 - mae: 2.0699\nEpoch 17/150\n25/25 [==============================] - 3s 117ms/step - loss: 1.6170 - mae: 2.0609\nEpoch 18/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.6078 - mae: 2.0499\nEpoch 19/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.6057 - mae: 2.0479\nEpoch 20/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5959 - mae: 2.0381\nEpoch 21/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5887 - mae: 2.0310\nEpoch 22/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.5871 - mae: 2.0295\nEpoch 23/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5821 - mae: 2.0245\nEpoch 24/150\n25/25 [==============================] - 3s 112ms/step - loss: 1.5777 - mae: 2.0204\nEpoch 25/150\n25/25 [==============================] - 3s 125ms/step - loss: 1.5784 - mae: 2.0192\nEpoch 26/150\n25/25 [==============================] - 3s 112ms/step - loss: 1.5675 - mae: 2.0094\nEpoch 27/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5660 - mae: 2.0062\nEpoch 28/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5617 - mae: 2.0037\nEpoch 29/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5587 - mae: 1.9994\nEpoch 30/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.5572 - mae: 1.9976\nEpoch 31/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5547 - mae: 1.9960\nEpoch 32/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5533 - mae: 1.9950\nEpoch 33/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5506 - mae: 1.9926\nEpoch 34/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.5485 - mae: 1.9888\nEpoch 35/150\n25/25 [==============================] - 3s 115ms/step - loss: 1.5520 - mae: 1.9935\nEpoch 36/150\n25/25 [==============================] - 3s 112ms/step - loss: 1.5489 - mae: 1.9900\nEpoch 37/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5433 - mae: 1.9840\nEpoch 38/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5417 - mae: 1.9830\nEpoch 39/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.5394 - mae: 1.9812\nEpoch 40/150\n25/25 [==============================] - 3s 122ms/step - loss: 1.5409 - mae: 1.9820\nEpoch 41/150\n25/25 [==============================] - 3s 115ms/step - loss: 1.5405 - mae: 1.9820\nEpoch 42/150\n25/25 [==============================] - 3s 117ms/step - loss: 1.5362 - mae: 1.9768\nEpoch 43/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5349 - mae: 1.9770\nEpoch 44/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.5357 - mae: 1.9767\nEpoch 45/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5357 - mae: 1.9783\nEpoch 46/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.5315 - mae: 1.9721\nEpoch 47/150\n25/25 [==============================] - 3s 115ms/step - loss: 1.5334 - mae: 1.9754\nEpoch 48/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.5286 - mae: 1.9701\nEpoch 49/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5309 - mae: 1.9728\nEpoch 50/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5312 - mae: 1.9723\nEpoch 51/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5273 - mae: 1.9682\nEpoch 52/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5296 - mae: 1.9709\nEpoch 53/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5282 - mae: 1.9696\nEpoch 54/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.5288 - mae: 1.9710\nEpoch 55/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5234 - mae: 1.9662\nEpoch 56/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5241 - mae: 1.9660\nEpoch 57/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5219 - mae: 1.9636\nEpoch 58/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.5242 - mae: 1.9644\nEpoch 59/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5345 - mae: 1.9768\nEpoch 60/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5268 - mae: 1.9676\nEpoch 61/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.5206 - mae: 1.9636\nEpoch 62/150\n25/25 [==============================] - 3s 106ms/step - loss: 1.5190 - mae: 1.9604\nEpoch 63/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5192 - mae: 1.9614\nEpoch 64/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5185 - mae: 1.9587\nEpoch 65/150\n25/25 [==============================] - 3s 116ms/step - loss: 1.5167 - mae: 1.9590\nEpoch 66/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5182 - mae: 1.9590\nEpoch 67/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5161 - mae: 1.9583\nEpoch 68/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.5216 - mae: 1.9642\nEpoch 69/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5161 - mae: 1.9576\nEpoch 70/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.5161 - mae: 1.9560\nEpoch 71/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5147 - mae: 1.9559\nEpoch 72/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5134 - mae: 1.9546\nEpoch 73/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5114 - mae: 1.9538\nEpoch 74/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5162 - mae: 1.9571\nEpoch 75/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5099 - mae: 1.9524\nEpoch 76/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5129 - mae: 1.9538\nEpoch 77/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5107 - mae: 1.9524\nEpoch 78/150\n25/25 [==============================] - 3s 112ms/step - loss: 1.5103 - mae: 1.9515\nEpoch 79/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.5129 - mae: 1.9540\nEpoch 80/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5076 - mae: 1.9484\nEpoch 81/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5090 - mae: 1.9504\nEpoch 82/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5116 - mae: 1.9531\nEpoch 83/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.5061 - mae: 1.9467\nEpoch 84/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5054 - mae: 1.9465\nEpoch 85/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5063 - mae: 1.9470\nEpoch 86/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5046 - mae: 1.9453\nEpoch 87/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.5034 - mae: 1.9444\nEpoch 88/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.5037 - mae: 1.9452\nEpoch 89/150\n25/25 [==============================] - 3s 113ms/step - loss: 1.5030 - mae: 1.9432\nEpoch 90/150\n25/25 [==============================] - 3s 116ms/step - loss: 1.5012 - mae: 1.9426\nEpoch 91/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5033 - mae: 1.9441\nEpoch 92/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.5024 - mae: 1.9430\nEpoch 93/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4997 - mae: 1.9402\nEpoch 94/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4986 - mae: 1.9391\nEpoch 95/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4972 - mae: 1.9375\nEpoch 96/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.5060 - mae: 1.9462\nEpoch 97/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4984 - mae: 1.9381\nEpoch 98/150\n25/25 [==============================] - 3s 106ms/step - loss: 1.4985 - mae: 1.9391\nEpoch 99/150\n25/25 [==============================] - 3s 111ms/step - loss: 1.4981 - mae: 1.9389\nEpoch 100/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4943 - mae: 1.9354\nEpoch 101/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4951 - mae: 1.9351\nEpoch 102/150\n25/25 [==============================] - 3s 106ms/step - loss: 1.4953 - mae: 1.9356\nEpoch 103/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4923 - mae: 1.9320\nEpoch 104/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4919 - mae: 1.9317\nEpoch 105/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4932 - mae: 1.9333\nEpoch 106/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4916 - mae: 1.9316\nEpoch 107/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4912 - mae: 1.9318\nEpoch 108/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4930 - mae: 1.9331\nEpoch 109/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4899 - mae: 1.9313\nEpoch 110/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.4908 - mae: 1.9308\nEpoch 111/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4909 - mae: 1.9310\nEpoch 112/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4904 - mae: 1.9299\nEpoch 113/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4879 - mae: 1.9281\nEpoch 114/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4869 - mae: 1.9276\nEpoch 115/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4876 - mae: 1.9273\nEpoch 116/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.4887 - mae: 1.9274\nEpoch 117/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.4887 - mae: 1.9292\nEpoch 118/150\n25/25 [==============================] - 3s 114ms/step - loss: 1.4867 - mae: 1.9285\nEpoch 119/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4871 - mae: 1.9271\nEpoch 120/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4861 - mae: 1.9260\nEpoch 121/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4871 - mae: 1.9264\nEpoch 122/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4904 - mae: 1.9311\nEpoch 123/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4886 - mae: 1.9284\nEpoch 124/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4915 - mae: 1.9321\nEpoch 125/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4867 - mae: 1.9260\nEpoch 126/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4862 - mae: 1.9264\nEpoch 127/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4861 - mae: 1.9255\nEpoch 128/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4846 - mae: 1.9245\nEpoch 129/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4838 - mae: 1.9235\nEpoch 130/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4820 - mae: 1.9234\nEpoch 131/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4868 - mae: 1.9279\nEpoch 132/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4829 - mae: 1.9230\nEpoch 133/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4916 - mae: 1.9312\nEpoch 134/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4883 - mae: 1.9279\nEpoch 135/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4889 - mae: 1.9290\nEpoch 136/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4811 - mae: 1.9216\nEpoch 137/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4825 - mae: 1.9222\nEpoch 138/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4855 - mae: 1.9253\nEpoch 139/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4836 - mae: 1.9229\nEpoch 140/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4831 - mae: 1.9212\nEpoch 141/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4835 - mae: 1.9227\nEpoch 142/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4800 - mae: 1.9203\nEpoch 143/150\n25/25 [==============================] - 3s 109ms/step - loss: 1.4789 - mae: 1.9195\nEpoch 144/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4831 - mae: 1.9230\nEpoch 145/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.4839 - mae: 1.9234\nEpoch 146/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4791 - mae: 1.9188\nEpoch 147/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4835 - mae: 1.9231\nEpoch 148/150\n25/25 [==============================] - 3s 107ms/step - loss: 1.4799 - mae: 1.9193\nEpoch 149/150\n25/25 [==============================] - 3s 108ms/step - loss: 1.4791 - mae: 1.9187\nEpoch 150/150\n25/25 [==============================] - 3s 110ms/step - loss: 1.4790 - mae: 1.9193\n" ], [ "rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)\nrnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]", "_____no_output_____" ], [ "plt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, rnn_forecast)", "_____no_output_____" ], [ "tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()", "_____no_output_____" ], [ "print(rnn_forecast)", "[11.66933 11.05092 12.271348 ... 13.672548 13.805385 15.023049]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05b318cd85bd5625378138c48e28a7941d2fa14
885,693
ipynb
Jupyter Notebook
hat11/db_h11_hd110833.ipynb
bmorris3/freckles
057a23b34d760b712c26b47e8734312243639e78
[ "MIT" ]
1
2021-09-03T15:19:23.000Z
2021-09-03T15:19:23.000Z
hat11/db_h11_hd110833.ipynb
bmorris3/freckles
057a23b34d760b712c26b47e8734312243639e78
[ "MIT" ]
null
null
null
hat11/db_h11_hd110833.ipynb
bmorris3/freckles
057a23b34d760b712c26b47e8734312243639e78
[ "MIT" ]
null
null
null
1,559.318662
601,788
0.958603
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "import h5py\n\narchive = h5py.File('/Users/bmmorris/git/aesop/notebooks/spectra.hdf5', 'r+')", "_____no_output_____" ], [ "targets = list(archive)", "_____no_output_____" ], [ "list(archive['HD122120'])#['2017-09-11T03:27:13.140']['flux'][:]", "_____no_output_____" ], [ "from scipy.ndimage import gaussian_filter1d\n\nspectrum1 = archive['HATP11']['2017-06-12T07:28:06.310'] # K4\nspectrum2 = archive['HD110833']['2017-03-17T05:47:24.899'] # K3\nspectrum3 = archive['HD122120']['2017-06-15T03:52:13.690'] # K5\n\nwavelength1 = spectrum1['wavelength'][:]\nflux1 = spectrum1['flux'][:]\n\nwavelength2 = spectrum2['wavelength'][:]\nflux2 = spectrum2['flux'][:]\n\nwavelength3 = spectrum3['wavelength'][:]\nflux3 = spectrum3['flux'][:]\n\nplt.plot(wavelength1, flux1)\n\nplt.plot(wavelength2, gaussian_filter1d(flux2, 1))# + 0.2)\n\nplt.plot(wavelength3, gaussian_filter1d(flux3, 1))# + 0.4)\n\nplt.ylim([0.5, 1.1])\n#plt.xlim([3900, 4000])\n# plt.xlim([7035, 7075])\nplt.xlim([8850, 8890])", "_____no_output_____" ], [ "import sys\nsys.path.insert(0, '../')\nfrom toolkit import SimpleSpectrum", "_____no_output_____" ], [ "import astropy.units as u\n\ntarget = SimpleSpectrum(wavelength1, flux1, dispersion_unit=u.Angstrom)\nsource1 = SimpleSpectrum(wavelength2, flux2, dispersion_unit=u.Angstrom)\nsource2 = SimpleSpectrum(wavelength3, flux3, dispersion_unit=u.Angstrom)", "_____no_output_____" ], [ "from toolkit import instr_model\n\nfrom toolkit import slice_spectrum, concatenate_spectra, bands_TiO\nspec_band = []\n\nfirst_n_bands = 5\nwidth = 5\n\nfor band in bands_TiO[:first_n_bands]:\n target_slice = slice_spectrum(target, band.min-width*u.Angstrom, band.max+width*u.Angstrom)\n target_slice.flux /= target_slice.flux.max()\n spec_band.append(target_slice)\n\ntarget_slices = concatenate_spectra(spec_band)\ntarget_slices.plot(color='k', lw=2, marker='.')\n\nspec_band = []\nfor band, inds in zip(bands_TiO[:first_n_bands], target_slices.wavelength_splits):\n target_slice = slice_spectrum(source1, band.min-width*u.Angstrom, band.max+width*u.Angstrom, \n force_length=abs(np.diff(inds))[0])\n target_slice.flux /= target_slice.flux.max()\n spec_band.append(target_slice)\n\nsource1_slices = concatenate_spectra(spec_band)\nsource1_slices.plot(color='r', lw=2, marker='.')\n\nspec_band = []\nfor band, inds in zip(bands_TiO[:first_n_bands], target_slices.wavelength_splits):\n target_slice = slice_spectrum(source2, band.min-width*u.Angstrom, band.max+width*u.Angstrom, \n force_length=abs(np.diff(inds))[0])\n target_slice.flux /= target_slice.flux.max()\n spec_band.append(target_slice)\n\nsource2_slices = concatenate_spectra(spec_band)\nsource2_slices.plot(color='b', lw=2, marker='.')", "_____no_output_____" ], [ "def plot_spliced_spectrum(observed_spectrum, model_flux, other_model=None):\n n_chunks = len(observed_spectrum.wavelength_splits)\n fig, ax = plt.subplots(n_chunks, 1, figsize=(8, 10))\n\n for i, inds in enumerate(observed_spectrum.wavelength_splits):\n min_ind, max_ind = inds\n \n ax[i].errorbar(observed_spectrum.wavelength[min_ind:max_ind].value, \n observed_spectrum.flux[min_ind:max_ind], \n 0.025*np.ones(max_ind-min_ind))\n ax[i].plot(observed_spectrum.wavelength[min_ind:max_ind], \n model_flux[min_ind:max_ind])\n \n if other_model is not None:\n ax[i].plot(observed_spectrum.wavelength[min_ind:max_ind], \n other_model[min_ind:max_ind], alpha=0.4)\n \n ax[i].set_xlim([observed_spectrum.wavelength[min_ind].value,\n observed_spectrum.wavelength[max_ind-1].value])\n ax[i].set_ylim([0.9*observed_spectrum.flux[min_ind:max_ind].min(), \n 1.1])\n\n return fig, ax\n\nplot_spliced_spectrum(target_slices, source1_slices.flux, source2_slices.flux)", "_____no_output_____" ], [ "model, resid = instr_model(target_slices, source1_slices, source2_slices, np.log(0.5), 1, 1, 0, 0, 0, 0, 0)", "_____no_output_____" ], [ "plt.plot(target_slices.flux - model)", "_____no_output_____" ], [ "# from scipy.optimize import fmin_l_bfgs_b\n\n# def chi2(p, target, temp_phot, temp_spot):\n# spotted_area, lam_offset0, lam_offset1, lam_offset2, res = p\n# lam_offsets = [lam_offset0, lam_offset1, lam_offset1]\n# model, residuals = instr_model(target, temp_phot, temp_spot, spotted_area, \n# res, *lam_offsets)\n# return residuals\n\n# bounds = [[-30, 0], [-2, 2], [-2, 2], [-2, 2], [1, 15]]\n# initp = [np.log(0.03), 0.0, 0.0, 0.0, 1]\n\n# bfgs_options_fast = dict(epsilon=1e-3, approx_grad=True,\n# m=10, maxls=20)\n# bfgs_options_precise = dict(epsilon=1e-3, approx_grad=True,\n# m=30, maxls=50)\n\n# result = fmin_l_bfgs_b(chi2, initp, bounds=bounds, \n# args=(target_slices, source1_slices, source2_slices),\n# **bfgs_options_precise)\n# #**bfgs_options_fast)", "_____no_output_____" ], [ "# model, resid = instr_model(target_slices, source1_slices, source2_slices, *result[0])", "_____no_output_____" ], [ "# plot_spliced_spectrum(target_slices, model)", "_____no_output_____" ], [ "import emcee\n\nyerr = 0.01\n\ndef random_in_range(min, max):\n return (max-min)*np.random.rand(1)[0] + min\n\ndef lnprior(theta):\n log_spotted_area, res = theta[:2]\n dlambdas = theta[2:]\n if (-15 < log_spotted_area <= 0 and 0. <= res < 3 and all([-3 < dlambda < 3 for dlambda in dlambdas])):\n return 0.0\n return -np.inf\n\ndef lnlike(theta, target, source1, source2):\n log_spotted_area, res = theta[:2]\n dlambdas = theta[2:]\n model, residuals = instr_model(target, source1, source2, np.exp(log_spotted_area), \n res, *dlambdas)\n return -0.5*residuals/yerr**2\n\ndef lnprob(theta, target, source1, source2):\n lp = lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, target, source1, source2)\n\nfrom emcee import EnsembleSampler\n\ndlam_init = -0.2\n# initp = np.array([np.log(0.01), 1, dlam_init, dlam_init, dlam_init, dlam_init, dlam_init])\nndim, nwalkers = 6, 30\n\npos = []\n\ncounter = -1\nwhile len(pos) < nwalkers:\n realization = [random_in_range(-10, -8), random_in_range(0, 1),\n random_in_range(dlam_init-0.1, dlam_init+0.1), random_in_range(dlam_init-0.1, dlam_init+0.1), \n random_in_range(dlam_init-0.1, dlam_init+0.1), random_in_range(dlam_init-0.1, dlam_init+0.1)]\n if np.isfinite(lnprior(realization)):\n pos.append(realization)\n\nsampler = EnsembleSampler(nwalkers, ndim, lnprob, threads=8, \n args=(target_slices, source1_slices, source2_slices))", "_____no_output_____" ], [ "sampler.run_mcmc(pos, 4000);", "_____no_output_____" ], [ "from corner import corner\n\nsamples = sampler.chain[:, 1500:, :].reshape((-1, ndim))\n\ncorner(samples, labels=['$\\log f_s$', '$R$', '$\\Delta \\lambda_0$', '$\\Delta \\lambda_1$', \n '$\\Delta \\lambda_2$', '$\\Delta \\lambda_3$']);#, '$\\Delta \\lambda_4$']);", "_____no_output_____" ], [ "best_params = sampler.flatchain[np.argmax(sampler.flatlnprobability, axis=0), :]\nbest_model = instr_model(target_slices, source1_slices, source2_slices, \n *best_params)[0]", "_____no_output_____" ], [ "best_params", "_____no_output_____" ], [ "# maximum spotted area \nnp.exp(np.percentile(samples[:, 0], 98))", "_____no_output_____" ], [ "n_chunks = len(target_slices.wavelength_splits)\nfig, ax = plt.subplots(n_chunks, 1, figsize=(8, 10))\n\nfrom copy import deepcopy\nfrom toolkit.analysis import gaussian_kernel\n\nfor i, inds in enumerate(target_slices.wavelength_splits):\n min_ind, max_ind = inds\n\n ax[i].errorbar(target_slices.wavelength[min_ind:max_ind].value, \n target_slices.flux[min_ind:max_ind], \n yerr*np.ones_like(target_slices.flux[min_ind:max_ind]), \n fmt='o', color='k')\n #0.025*np.ones(max_ind-min_ind), fmt='.')\n ax[i].plot(target_slices.wavelength[min_ind:max_ind], \n best_model[min_ind:max_ind], color='r')\n\n ax[i].set_xlim([target_slices.wavelength[min_ind].value, \n target_slices.wavelength[max_ind-1].value])\n #ax[i].set_ylim([0.9*target_slices.flux[min_ind:max_ind].min(), \n # 1.1])\n\nn_random_draws = 100\n# draw models from posteriors\nfor j in range(n_random_draws):\n step = np.random.randint(0, samples.shape[0])\n random_step = samples[step, :]\n\n rand_model = instr_model(target_slices, source1_slices, source2_slices, *random_step)[0]\n\n for i, inds in enumerate(target_slices.wavelength_splits):\n min_ind, max_ind = inds\n ax[i].plot(target_slices.wavelength[min_ind:max_ind], \n rand_model[min_ind:max_ind], color='#389df7', alpha=0.1)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05b414db5f26cc67f8f113a4bb6de6af0677bb3
83,379
ipynb
Jupyter Notebook
examples/rec_books.ipynb
bizzyvinci/wikirec
cd36125acb4a508d1d138736e8f92f0ee3f8daa2
[ "BSD-3-Clause" ]
null
null
null
examples/rec_books.ipynb
bizzyvinci/wikirec
cd36125acb4a508d1d138736e8f92f0ee3f8daa2
[ "BSD-3-Clause" ]
null
null
null
examples/rec_books.ipynb
bizzyvinci/wikirec
cd36125acb4a508d1d138736e8f92f0ee3f8daa2
[ "BSD-3-Clause" ]
null
null
null
72.503478
51,556
0.801725
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Download-and-Clean-Data\" data-toc-modified-id=\"Download-and-Clean-Data-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Download and Clean Data</a></span></li><li><span><a href=\"#Making-Recommendations\" data-toc-modified-id=\"Making-Recommendations-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Making Recommendations</a></span><ul class=\"toc-item\"><li><span><a href=\"#BERT\" data-toc-modified-id=\"BERT-2.1\"><span class=\"toc-item-num\">2.1&nbsp;&nbsp;</span>BERT</a></span></li><li><span><a href=\"#Doc2vec\" data-toc-modified-id=\"Doc2vec-2.2\"><span class=\"toc-item-num\">2.2&nbsp;&nbsp;</span>Doc2vec</a></span></li><li><span><a href=\"#LDA\" data-toc-modified-id=\"LDA-2.3\"><span class=\"toc-item-num\">2.3&nbsp;&nbsp;</span>LDA</a></span></li><li><span><a href=\"#TFIDF\" data-toc-modified-id=\"TFIDF-2.4\"><span class=\"toc-item-num\">2.4&nbsp;&nbsp;</span>TFIDF</a></span></li></ul></li></ul></div>", "_____no_output_____" ], [ "**rec_books**\n\nDownloads an English Wikipedia dump and parses it for all available books. All available models are then ran to compare recommendation efficacy.\n\nIf using this notebook in [Google Colab](https://colab.research.google.com/github/andrewtavis/wikirec/blob/main/examples/rec_books.ipynb), you can activate GPUs by following `Edit > Notebook settings > Hardware accelerator` and selecting `GPU`.", "_____no_output_____" ] ], [ [ "# pip install wikirec -U", "_____no_output_____" ] ], [ [ "The following gensim update might be necessary in Google Colab as the default version is very low.", "_____no_output_____" ] ], [ [ "# pip install gensim -U", "_____no_output_____" ] ], [ [ "In Colab you'll also need to download nltk's names data.", "_____no_output_____" ] ], [ [ "# import nltk\n# nltk.download(\"names\")", "_____no_output_____" ], [ "import os\nimport json\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set(style=\"darkgrid\")\nsns.set(rc={\"figure.figsize\": (15, 5)})\n\nfrom wikirec import data_utils, model, utils\n\nfrom IPython.core.display import display, HTML\n\ndisplay(HTML(\"<style>.container { width:99% !important; }</style>\"))", "_____no_output_____" ] ], [ [ "# Download and Clean Data", "_____no_output_____" ] ], [ [ "files = data_utils.download_wiki(\n language=\"en\", target_dir=\"./enwiki_dump\", file_limit=-1, dump_id=False\n)\nlen(files)", "Files already available in the ./enwiki_dump directory.\n" ], [ "topic = \"books\"", "_____no_output_____" ], [ "data_utils.parse_to_ndjson(\n topics=topic,\n output_path=\"./enwiki_books.ndjson\",\n input_dir=\"./enwiki_dump\",\n partitions_dir=\"./enwiki_book_partitions\",\n limit=None,\n delete_parsed_files=True,\n multicore=True,\n verbose=True,\n)", "File ./enwiki_books.ndjson with articles for the given topics already exists\n" ], [ "with open(\"./enwiki_books.ndjson\", \"r\") as fin:\n books = [json.loads(l) for l in fin]\n\nprint(f\"Found a total of {len(books)} books.\")", "Found a total of 41234 books.\n" ], [ "titles = [m[0] for m in books]\ntexts = [m[1] for m in books]", "_____no_output_____" ], [ "if os.path.isfile(\"./book_corpus_idxs.pkl\"):\n print(f\"Loading book corpus and selected indexes\")\n with open(f\"./book_corpus_idxs.pkl\", \"rb\") as f:\n text_corpus, selected_idxs = pickle.load(f)\n selected_titles = [titles[i] for i in selected_idxs]\n\nelse:\n print(f\"Creating book corpus and selected indexes\")\n text_corpus, selected_idxs = data_utils.clean(\n texts=texts,\n language=\"en\",\n min_token_freq=5, # 0 for Bert\n min_token_len=3, # 0 for Bert\n min_tokens=50,\n max_token_index=-1,\n min_ngram_count=3,\n remove_stopwords=True, # False for Bert\n ignore_words=None,\n remove_names=True,\n sample_size=1,\n verbose=True,\n )\n\n selected_titles = [titles[i] for i in selected_idxs]\n\n with open(\"./book_corpus_idxs.pkl\", \"wb\") as f:\n print(\"Pickling book corpus and selected indexes\")\n pickle.dump([text_corpus, selected_idxs], f, protocol=4)", "Loading book corpus and selected indexes\n" ] ], [ [ "# Making Recommendations", "_____no_output_____" ] ], [ [ "single_input_0 = \"Harry Potter and the Philosopher's Stone\"\nsingle_input_1 = \"The Hobbit\"\nmultiple_inputs = [\"Harry Potter and the Philosopher's Stone\", \"The Hobbit\"]", "_____no_output_____" ], [ "def load_or_create_sim_matrix(\n method,\n corpus,\n metric,\n topic,\n path=\"./\",\n bert_st_model=\"xlm-r-bert-base-nli-stsb-mean-tokens\",\n **kwargs,\n):\n \"\"\"\n Loads or creats a similarity matrix to deliver recommendations\n \n NOTE: the .pkl files made are 5-10GB or more in size\n \"\"\"\n if os.path.isfile(f\"{path}{topic}_{metric}_{method}_sim_matrix.pkl\"):\n print(f\"Loading {method} {topic} {metric} similarity matrix\")\n with open(f\"{path}{topic}_{metric}_{method}_sim_matrix.pkl\", \"rb\") as f:\n sim_matrix = pickle.load(f)\n\n else:\n print(f\"Creating {method} {topic} {metric} similarity matrix\")\n embeddings = model.gen_embeddings(\n method=method, corpus=corpus, bert_st_model=bert_st_model, **kwargs,\n )\n sim_matrix = model.gen_sim_matrix(\n method=method, metric=metric, embeddings=embeddings,\n )\n\n with open(f\"{path}{topic}_{metric}_{method}_sim_matrix.pkl\", \"wb\") as f:\n print(f\"Pickling {method} {topic} {metric} similarity matrix\")\n pickle.dump(sim_matrix, f, protocol=4)\n\n return sim_matrix", "_____no_output_____" ] ], [ [ "## BERT", "_____no_output_____" ] ], [ [ "# Remove n-grams for BERT training\ncorpus_no_ngrams = [\n \" \".join([t for t in text.split(\" \") if \"_\" not in t]) for text in text_corpus\n]", "_____no_output_____" ], [ "# We can pass kwargs for sentence_transformers.SentenceTransformer.encode\nbert_sim_matrix = load_or_create_sim_matrix(\n method=\"bert\",\n corpus=corpus_no_ngrams,\n metric=\"cosine\", # euclidean\n topic=topic,\n path=\"./\",\n bert_st_model=\"xlm-r-bert-base-nli-stsb-mean-tokens\",\n show_progress_bar=True,\n batch_size=32,\n)", "Loading bert books cosine similarity matrix\n" ], [ "model.recommend(\n inputs=single_input_0,\n titles=selected_titles,\n sim_matrix=bert_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=single_input_1,\n titles=selected_titles,\n sim_matrix=bert_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=multiple_inputs,\n titles=selected_titles,\n sim_matrix=bert_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ] ], [ [ "## Doc2vec", "_____no_output_____" ] ], [ [ "# We can pass kwargs for gensim.models.doc2vec.Doc2Vec\ndoc2vec_sim_matrix = load_or_create_sim_matrix(\n method=\"doc2vec\",\n corpus=text_corpus,\n metric=\"cosine\", # euclidean\n topic=topic,\n path=\"./\",\n vector_size=100,\n epochs=10,\n alpha=0.025,\n)", "Loading doc2vec books cosine similarity matrix\n" ], [ "model.recommend(\n inputs=single_input_0,\n titles=selected_titles,\n sim_matrix=doc2vec_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=single_input_1,\n titles=selected_titles,\n sim_matrix=doc2vec_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=multiple_inputs,\n titles=selected_titles,\n sim_matrix=doc2vec_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ] ], [ [ "## LDA", "_____no_output_____" ] ], [ [ "topic_nums_to_compare = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\n# We can pass kwargs for gensim.models.ldamulticore.LdaMulticore\nutils.graph_lda_topic_evals(\n corpus=text_corpus,\n num_topic_words=10,\n topic_nums_to_compare=topic_nums_to_compare,\n metrics=True,\n verbose=True,\n)\n\nplt.show()", "_____no_output_____" ], [ "# We can pass kwargs for gensim.models.ldamulticore.LdaMulticore\nlda_sim_matrix = load_or_create_sim_matrix(\n method=\"lda\",\n corpus=text_corpus,\n metric=\"cosine\", # euclidean not an option at this time\n topic=topic,\n path=\"./\",\n num_topics=90,\n passes=10,\n decay=0.5,\n)", "Loading lda books cosine similarity matrix\n" ], [ "model.recommend(\n inputs=single_input_0,\n titles=selected_titles,\n sim_matrix=lda_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=single_input_1,\n titles=selected_titles,\n sim_matrix=lda_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=multiple_inputs,\n titles=selected_titles,\n sim_matrix=lda_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ] ], [ [ "## TFIDF", "_____no_output_____" ] ], [ [ "# We can pass kwargs for sklearn.feature_extraction.text.TfidfVectorizer\ntfidf_sim_matrix = load_or_create_sim_matrix(\n method=\"tfidf\",\n corpus=text_corpus,\n metric=\"cosine\", # euclidean\n topic=topic,\n path=\"./\",\n max_features=None,\n norm='l2',\n)", "Loading tfidf books cosine similarity matrix\n" ], [ "model.recommend(\n inputs=single_input_0,\n titles=selected_titles,\n sim_matrix=tfidf_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=single_input_1,\n titles=selected_titles,\n sim_matrix=tfidf_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ], [ "model.recommend(\n inputs=multiple_inputs,\n titles=selected_titles,\n sim_matrix=tfidf_sim_matrix,\n n=10,\n metric=\"cosine\",\n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d05b5204c2fb38039b84e0e0a61546b37c9dc793
239,039
ipynb
Jupyter Notebook
3_Reports/12.03_22_17/Report_03_22_17.ipynb
NeuroStat/IBMAvsGLM
6f77598e69ed01b2e970c88e226a774d4b94c603
[ "MIT" ]
null
null
null
3_Reports/12.03_22_17/Report_03_22_17.ipynb
NeuroStat/IBMAvsGLM
6f77598e69ed01b2e970c88e226a774d4b94c603
[ "MIT" ]
null
null
null
3_Reports/12.03_22_17/Report_03_22_17.ipynb
NeuroStat/IBMAvsGLM
6f77598e69ed01b2e970c88e226a774d4b94c603
[ "MIT" ]
null
null
null
588.76601
83,858
0.928518
[ [ [ "# CI coverage, length and bias\nFor event related design. ", "_____no_output_____" ] ], [ [ "# Directories of the data for different scenario's\nDATAwd <- list(\n 'Take[8mmBox10]' = \"/Volumes/2_TB_WD_Elements_10B8_Han/PhD/IBMAvsGLM/Results/Cambridge/ThirdLevel/8mm/boxcar10\",\n 'Take[8mmEvent2]' = \"/Volumes/2_TB_WD_Elements_10B8_Han/PhD/IBMAvsGLM/Results/Cambridge/ThirdLevel/8mm/event2\"\n\t)\nNUMDATAwd <- length(DATAwd)\ncurrentWD <- 2\n\n# Number of conficence intervals\nCIs <- c('MA-weightVar','GLM-t')\nNumCI <- length(CIs)\n\n# Number of executed runs\nnruns.tmp <- matrix(c(\n 1,2500,\n 2,500\n ), ncol=2, byrow=TRUE)\nnruns <- nruns.tmp[currentWD,2]\n\n\n# Number of subjects and studies\nnsub <- 20\nnstud <- 5\n\n# Dimension of brain\nDIM <- c(91,109,91)\n\n# True value\ntrueVal <- 0\n\n# Load in libraries\nlibrary(oro.nifti)\nlibrary(dplyr)\nlibrary(lattice)\nlibrary(grDevices)\nlibrary(ggplot2)\nlibrary(data.table)\nlibrary(gridExtra)\n\n# Function to count the number of instances in which true value is between lower and upper CI.\nindicator <- function(UPPER, LOWER, trueval){\n IND <- trueval >= LOWER & trueval <= UPPER\n IND[is.na(IND)] <- 0\n return(IND)\n}\n\n# Funtion to count the number of recorded values\ncounting <- function(UPPER, LOWER){\n count <- (!is.na(UPPER) & !is.na(LOWER))\n return(count)\n}\n\n##\n###############\n### Data Wrangling\n###############\n##\n\n######################################################\n# First we create a universal mask over all iterations\n######################################################\n\n# Set warnings off\n# options(warn = -1)\n\n# Vector to check progress\nCheckProgr <- floor(seq(1,nruns,length.out=10))\n\n# Vector of simulations where we have a missing mask\nmissingMask <- c()\n\n# Do you want to make an universal mask again?\nWRITEMASK <- FALSE\nif(isTRUE(WRITEMASK)){\n # Vector with all masks in it\n AllMask <- c()\n\n # Load in the masks\n for(i in 1:nruns){\n # Print progress\n if(i %in% CheckProgr) print(paste('LOADING MASKS. NOW AT ', (i/nruns)*100, '%', sep = ''))\n\n # Try reading in mask, then go to one column and convert to data frame.\n CheckMask <- try(readNIfTI(paste(DATAwd[[currentWD]], '/', i,'/mask.nii', sep = ''))[,,,1] %>%\n matrix(.,ncol = 1) %>% data.frame(), silent = TRUE)\n # If there is no mask, skip iteration\n if(class(CheckMask) == \"try-error\"){ missingMask <- c(missingMask, i); next}\n\n # Some masks are broken: if all values are zero: REPORT\n if(all(CheckMask == 0)){print(paste(\"CHECK MASK AT ITERATION \", i, sep = \"\")); next}\n\n # Bind the masks of all iterations together\n AllMask <- bind_cols(AllMask, CheckMask)\n rm(CheckMask)\n }\n\n # Take product to have universal mask\n UnivMask <- apply(AllMask, 1, prod)\n\n # Better write this to folder\n niftiimage <- nifti(img=array(UnivMask, dim = DIM),dim=DIM)\n writeNIfTI(niftiimage,filename=paste(DATAwd[[currentWD]],'/universalMask',sep=''),gzipped=FALSE)\n}\nif(isTRUE(!WRITEMASK)){\n # Read in mask\n UnivMask <- readNIfTI(paste(DATAwd[[currentWD]],'/universalMask.nii', sep = ''))[,,] %>%\n matrix(.,ncol = 1)\n}\n", "_____no_output_____" ], [ "# Load the naming structure of the data\nload(paste(paste(DATAwd[['Take[8mmBox10]']], '/1/ObjectsRestMAvsGLM_1.RData',sep=''))); objects <- names(ObjectsRestMAvsGLM); rm(ObjectsRestMAvsGLM)\nOBJ.ID <- c(rep(objects[!objects %in% c(\"STHEDGE\",\"STWEIGHTS\")], each=prod(DIM)), rep(c(\"STHEDGE\",\"STWEIGHTS\"), each=c(prod(DIM)*nstud)))\n\nobjects.CI <- objects[grepl(c('upper'), objects) | grepl(c('lower'), objects)]\n", "_____no_output_____" ], [ "# Pre-define the CI coverage and length vectors in which we sum the values\n# After running nruns, divide by amount of obtained runs.\n# For bias, we work with VAR(X) = E(X**2) - E(X)**2 and a vector in which we sum the bias.\n# Hence, we need to sum X**2 and X in a separate vector.\nsummed.coverage.IBMA <- summed.coverage.GLM <-\nsummed.length.IBMA <- summed.length.GLM <-\nsummed.X.IBMA <- summed.X.GLM <-\nsummed.X2.IBMA <- summed.X2.GLM <-\n array(0,dim=c(sum(UnivMask == 1),1))\n\n# Keeping count of amount of values\ncounterMA <- counterGLM <- 0\n\n# Load in the data\nt1 <- Sys.time()\nfor(i in 1:nruns){\n if(i %in% CheckProgr) print(paste('PROCESSING. NOW AT ', (i/nruns)*100, '%', sep = ''))\n\n # CI coverage: loop over the two procedures\n for(p in 1:2){\n objUP <- objects.CI[grepl(c('upper'), objects.CI)][p] %>% gsub(\".\", \"_\",.,fixed = TRUE)\n objLOW <- objects.CI[grepl(c('lower'), objects.CI)][p] %>% gsub(\".\", \"_\",.,fixed = TRUE)\n\n UP <- try(fread(file = paste(DATAwd[[currentWD]], '/', i, '/', objUP, '.txt', sep = ''), header = FALSE) %>% filter(., UnivMask == 1), silent = TRUE)\n if(class(UP) == \"try-error\"){print(paste('Missing data in iteration ', i, sep = '')); next}\n LOW <- fread(file = paste(DATAwd[[currentWD]], '/',i, '/', objLOW, '.txt', sep = ''), header = FALSE) %>% filter(., UnivMask == 1)\n if(grepl('MA', x = objUP)){\n # CI coverage: add when true value in CI\n summed.coverage.IBMA[,1] <- summed.coverage.IBMA[,1] +\n indicator(UPPER = UP, LOWER = LOW, trueval = 0)\n # CI length: sum the length\n summed.length.IBMA[,1] <- summed.length.IBMA[,1] + as.matrix(UP - LOW)\n # Add one to the count (if data is available)\n counterMA <- counterMA + counting(UPPER = UP, LOWER = LOW)\n }else{\n # GLM procedure: CI coverage\n summed.coverage.GLM[,1] <- summed.coverage.GLM[,1] +\n indicator(UPPER = UP, LOWER = LOW, trueval = 0)\n # CI length: sum the length\n summed.length.GLM[,1] <- summed.length.GLM[,1] + as.matrix(UP - LOW)\n # Count\n counterGLM <- counterGLM + counting(UPPER = UP, LOWER = LOW)\n }\n rm(objUP, objLOW, UP, LOW)\n }\n\n # Standardized bias: read in weighted average / cope\n WAVG <- fread(file = paste(DATAwd[[currentWD]], '/', i, '/MA_WeightedAvg.txt', sep = ''), header = FALSE) %>% filter(., UnivMask == 1)\n GLMCOPE <- fread(file = paste(DATAwd[[currentWD]], '/', i, '/GLM_COPE', '.txt', sep = ''), header = FALSE) %>% filter(., UnivMask == 1)\n # Sum X\n summed.X.IBMA[,1] <- summed.X.IBMA[,1] + as.matrix(WAVG)\n summed.X.GLM[,1] <- summed.X.GLM[,1] + as.matrix(GLMCOPE)\n # Sum X**2\n summed.X2.IBMA[,1] <- summed.X2.IBMA[,1] + as.matrix(WAVG ** 2)\n summed.X2.GLM[,1] <- summed.X2.GLM[,1] + as.matrix(GLMCOPE ** 2)\n\n}\nSys.time() - t1\n", "[1] \"PROCESSING. NOW AT 0.2%\"\n[1] \"PROCESSING. NOW AT 11.2%\"\n[1] \"PROCESSING. NOW AT 22.2%\"\n[1] \"PROCESSING. NOW AT 33.4%\"\n[1] \"PROCESSING. NOW AT 44.4%\"\n[1] \"PROCESSING. NOW AT 55.6%\"\n[1] \"PROCESSING. NOW AT 66.6%\"\n[1] \"PROCESSING. NOW AT 77.8%\"\n[1] \"PROCESSING. NOW AT 88.8%\"\n[1] \"PROCESSING. NOW AT 100%\"\n" ], [ "# Calculate the average (over nsim) CI coverage, length and bias\nCoverage.IBMA <- summed.coverage.IBMA/counterMA\nCoverage.GLM <- summed.coverage.GLM/counterGLM\n\nLength.IBMA <- summed.length.IBMA/counterMA\nLength.GLM <- summed.length.GLM/counterGLM\n\n# Formula: Var(X) = E(X**2) - [E(X)]**2\n # E(X**2) = sum(X**2) / n\n # E(X) = sum(X) / n\n # \\hat{var(X)} = var(X) * (N / N-1)\n # \\hat{SD} = sqrt(\\hat{var(X)})\nsamplingSD.IBMA <- sqrt(((summed.X2.IBMA/(counterMA)) - ((summed.X.IBMA/counterMA)**2)) * (counterMA / (counterMA - 1)))\nsamplingSD.GLM <- sqrt(((summed.X2.GLM/(counterGLM)) - ((summed.X.GLM/counterGLM)**2)) * (counterGLM / (counterGLM - 1)))\n\n# Standardized bias: true beta = 0\nBias.IBMA <- ((summed.X.IBMA / counterMA) - 0) / samplingSD.IBMA\nBias.GLM <- ((summed.X.GLM / counterGLM) - 0) / samplingSD.GLM", "_____no_output_____" ], [ "# Heatmap of the coverages\nemptBrainIBMA <- emptBrainGLM <- array(NA, dim = prod(DIM))\nemptBrainIBMA[UnivMask == 1] <- c(summed.coverage.IBMA/counterMA)\nemptBrainGLM[UnivMask == 1] <- c(summed.coverage.GLM/counterGLM)\n\nLevelPlotMACoV <- levelplot(array(emptBrainIBMA, dim = DIM)[,,40], col.regions = topo.colors,\n \txlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'CI coverage meta-analysis')\nLevelPlotGLMCoV <- levelplot(array(emptBrainGLM, dim = DIM)[,,40], col.regions = topo.colors,\n xlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'CI coverage GLM')\n\n# Bias\nemptBrainIBMA <- emptBrainGLM <- array(NA, dim = prod(DIM))\nemptBrainIBMA[UnivMask == 1] <- Bias.IBMA\nemptBrainGLM[UnivMask == 1] <- Bias.GLM\nLevelPlotMABias <- levelplot(array(emptBrainIBMA, dim = DIM)[,,40], col.regions = topo.colors,\n \txlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'Standardized bias Meta-Analysis')\nLevelPlotGLMBias <- levelplot(array(emptBrainGLM, dim = DIM)[,,40], col.regions = topo.colors,\n xlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'Standardized bias GLM')\nDifferenceBias <- levelplot(array(emptBrainIBMA - emptBrainGLM, dim = DIM)[,,c(36:46)], col.regions = topo.colors,\n \txlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'Bias MA - GLM')\n\n# CI length\nemptBrainIBMA <- emptBrainGLM <- array(NA, dim = prod(DIM))\nemptBrainIBMA[UnivMask == 1] <- Length.IBMA\nemptBrainGLM[UnivMask == 1] <- Length.GLM\nLevelPlotMACL <- levelplot(array(emptBrainIBMA, dim = DIM)[,,40], col.regions = topo.colors,\n \txlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'CI length Meta-Analysis')\nLevelPlotGLMCL <- levelplot(array(emptBrainGLM, dim = DIM)[,,40], col.regions = topo.colors,\n xlim=c(0,DIM[1]),ylim=c(0,DIM[2]), xlab = 'x', ylab = 'y',\n main = 'CI length GLM')", "_____no_output_____" ], [ "grid.arrange(LevelPlotMACoV,LevelPlotGLMCoV, ncol = 2)", "_____no_output_____" ], [ "grid.arrange(LevelPlotMABias,LevelPlotGLMBias, ncol = 2)", "_____no_output_____" ], [ "grid.arrange(LevelPlotMACL,LevelPlotGLMCL, ncol = 2)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d05b60ef1e26eb35bc1cf7f58e29a414d7dbf0f4
52,649
ipynb
Jupyter Notebook
ML_Assignment 02_Diabetes Prediction/Diabetes_prediction.ipynb
parth111999/Data-Science-Assignment
ce1fb8414cbe71baa3852c827b4745c0bccfeb44
[ "Apache-2.0" ]
null
null
null
ML_Assignment 02_Diabetes Prediction/Diabetes_prediction.ipynb
parth111999/Data-Science-Assignment
ce1fb8414cbe71baa3852c827b4745c0bccfeb44
[ "Apache-2.0" ]
null
null
null
ML_Assignment 02_Diabetes Prediction/Diabetes_prediction.ipynb
parth111999/Data-Science-Assignment
ce1fb8414cbe71baa3852c827b4745c0bccfeb44
[ "Apache-2.0" ]
null
null
null
38.151449
509
0.33881
[ [ [ "<img src=\"http://cfs22.simplicdn.net/ice9/new_logo.svgz \"/>\n\n# Assignment 02: Evaluate the Diabetes Dataset\n\n*The comments/sections provided are your cues to perform the assignment. You don't need to limit yourself to the number of rows/cells provided. You can add additional rows in each section to add more lines of code.*\n\n*If at any point in time you need help on solving this assignment, view our demo video to understand the different steps of the code.*\n\n**Happy coding!**\n\n* * *", "_____no_output_____" ], [ "#### 1: Import the dataset", "_____no_output_____" ] ], [ [ "#Import the required libraries\nimport numpy as np\nimport pandas as pd ", "_____no_output_____" ], [ "#Import the diabetes dataset\ndata = pd.read_csv(\"pima-indians-diabetes.data\",header=None)", "_____no_output_____" ] ], [ [ "#### 2: Analyze the dataset", "_____no_output_____" ] ], [ [ "#View the first five observations of the dataset\ndata.head()", "_____no_output_____" ] ], [ [ "#### 3: Find the features of the dataset", "_____no_output_____" ] ], [ [ "#Use the .NAMES file to view and set the features of the dataset\nfeature_name = np.array([\"Pregnant\",\"Glucose\",\"BP\",\"Skin\",\"Insulin\",\"BMI\",\"Pedigree\",\"Age\",\"label\"])\ndf_data = pd.read_csv(\"pima-indians-diabetes.data\",names=feature_name)\ndf_data", "_____no_output_____" ], [ "#View the number of observations and features of the dataset\ndf_data.shape", "_____no_output_____" ] ], [ [ "#### 4: Find the response of the dataset", "_____no_output_____" ] ], [ [ "#Create the feature object\nX_feature = df_data[[\"Pregnant\",\"Glucose\",\"BP\",\"Skin\",\"Insulin\",\"BMI\",\"Pedigree\",\"Age\"]]\nX_feature", "_____no_output_____" ], [ "#Create the reponse object\ny_target = df_data[[\"label\"]]\ny_target", "_____no_output_____" ], [ "#View the shape of the feature object\nX_feature.shape", "_____no_output_____" ], [ "#View the shape of the target object\ny_target.shape", "_____no_output_____" ] ], [ [ "#### 5: Use training and testing datasets to train the model", "_____no_output_____" ] ], [ [ "#Split the dataset to test and train the model\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X_feature,y_target,test_size = 0.25,random_state = 20)", "_____no_output_____" ] ], [ [ "#### 6: Create a model to predict the diabetes outcome", "_____no_output_____" ] ], [ [ "# Create a logistic regression model using the training set\nfrom sklearn.linear_model import LogisticRegression\nlogreg = LogisticRegression()\nlogreg.fit(X_train,y_train)", "/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py:1679: FutureWarning: Feature names only support names that are all strings. Got feature names with dtypes: ['str_']. An error will be raised in 1.2.\n FutureWarning,\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py:985: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:818: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,\n" ], [ "#Make predictions using the testing set\nPrediction = logreg.predict(X_test)\nprint(Prediction[10:20])\nprint(y_test[10:20])", "[1 1 0 0 1 0 0 0 0 1]\n label\n702 1\n222 0\n20 0\n631 0\n147 0\n403 0\n526 0\n422 0\n150 0\n7 0\n" ] ], [ [ "#### 7: Check the accuracy of the model", "_____no_output_____" ] ], [ [ "#Evaluate the accuracy of your model\nfrom sklearn import metrics\nperformance = metrics.accuracy_score(y_test,Prediction)\nperformance", "_____no_output_____" ], [ "#Print the first 30 actual and predicted responses\nprint(f\"Predicted Value - {Prediction[0:30]}\")\nprint(f\"Actual Value - {y_test.values[0:30]}\")", "Predicted Value - [0 1 0 0 0 0 0 0 1 0 1 1 0 0 1 0 0 0 0 1 0 0 0 0 1 0 1 0 1 0]\nActual Value - [[1]\n [1]\n [0]\n [0]\n [0]\n [1]\n [1]\n [0]\n [0]\n [0]\n [1]\n [0]\n [0]\n [0]\n [0]\n [0]\n [0]\n [0]\n [0]\n [0]\n [0]\n [1]\n [0]\n [0]\n [1]\n [0]\n [0]\n [0]\n [1]\n [1]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d05b69e20ef2ec0cb235fa243bce51eb67d33d25
616,337
ipynb
Jupyter Notebook
pytorch_notebooks-master/mixtures_density_network_relu_version.ipynb
boyali/pytorch-mixture_of_density_networks
87f80dae19b421aa127062eacbabe8fc97cfd772
[ "MIT" ]
362
2017-07-11T07:33:32.000Z
2022-03-20T01:28:17.000Z
mixtures_density_network_relu_version.ipynb
jasonzdeng/pytorch_notebooks
83a0a20318eb2872537593a387069f3c74a186e8
[ "MIT" ]
5
2018-02-24T09:42:37.000Z
2020-10-09T05:56:26.000Z
mixtures_density_network_relu_version.ipynb
jasonzdeng/pytorch_notebooks
83a0a20318eb2872537593a387069f3c74a186e8
[ "MIT" ]
69
2017-07-11T08:05:14.000Z
2022-03-03T07:11:34.000Z
731.123369
151,092
0.939856
[ [ [ "## Mixture Density Networks with PyTorch ##\n\nRelated posts:\n\nJavaScript [implementation](http://blog.otoro.net/2015/06/14/mixture-density-networks/).\n\nTensorFlow [implementation](http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/).", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport math\nfrom torch.autograd import Variable\nimport torch.nn as nn", "_____no_output_____" ] ], [ [ "### Simple Data Fitting ###", "_____no_output_____" ], [ "Before we talk about MDN's, we try to perform some simple data fitting using PyTorch to make sure everything works. To get started, let's try to quickly build a neural network to fit some fake data. As neural nets of even one hidden layer can be universal function approximators, we can see if we can train a simple neural network to fit a noisy sinusoidal data, like this ( $\\epsilon$ is just standard gaussian random noise):\n\n$y=7.0 \\sin( 0.75 x) + 0.5 x + \\epsilon$", "_____no_output_____" ], [ "After importing the libraries, we generate the sinusoidal data we will train a neural net to fit later:", "_____no_output_____" ] ], [ [ "NSAMPLE = 1000\nx_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T\nr_data = np.float32(np.random.normal(size=(NSAMPLE,1)))\ny_data = np.float32(np.sin(0.75*x_data)*7.0+x_data*0.5+r_data*1.0)", "_____no_output_____" ], [ "plt.figure(figsize=(8, 8))\nplot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "We will define this simple neural network one-hidden layer and 100 nodes:\n$Y = W_{out} \\max( W_{in} X + b_{in}, 0) + b_{out}$", "_____no_output_____" ] ], [ [ "# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\n# from (https://github.com/jcjohnson/pytorch-examples)\nN, D_in, H, D_out = NSAMPLE, 1, 100, 1", "_____no_output_____" ], [ "# Create random Tensors to hold inputs and outputs, and wrap them in Variables.\n# since NSAMPLE is not large, we train entire dataset in one minibatch.\nx = Variable(torch.from_numpy(x_data.reshape(NSAMPLE, D_in)))\ny = Variable(torch.from_numpy(y_data.reshape(NSAMPLE, D_out)), requires_grad=False)", "_____no_output_____" ], [ "model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )", "_____no_output_____" ] ], [ [ "We can define a loss function as the sum of square error of the output vs the data (we can add regularisation if we want).", "_____no_output_____" ] ], [ [ "loss_fn = torch.nn.MSELoss()", "_____no_output_____" ] ], [ [ "We will also define a training loop to minimise the loss function later. We can use the RMSProp gradient descent optimisation method.", "_____no_output_____" ] ], [ [ "learning_rate = 0.01\noptimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, alpha=0.8)\nfor t in range(100000):\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n if (t % 10000 == 0):\n print(t, loss.data[0])\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()", "0 43.28170394897461\n10000 2.383552074432373\n20000 1.9408857822418213\n30000 1.7749266624450684\n40000 1.6699488162994385\n50000 1.5811467170715332\n60000 1.495665431022644\n70000 1.4401085376739502\n80000 1.404916524887085\n90000 1.3641223907470703\n" ], [ "x_test = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T\nx_test = Variable(torch.from_numpy(x_test.reshape(NSAMPLE, D_in)))\ny_test = model(x_test)\nplt.figure(figsize=(8, 8))\nplt.plot(x_data,y_data,'ro', x_test.data.numpy(),y_test.data.numpy(),'bo',alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "We see that the neural network can fit this sinusoidal data quite well, as expected. However, this type of fitting method only works well when the function we want to approximate with the neural net is a one-to-one, or many-to-one function. Take for example, if we invert the training data:\n$x=7.0 \\sin( 0.75 y) + 0.5 y+ \\epsilon$", "_____no_output_____" ] ], [ [ "temp_data = x_data\nx_data = y_data\ny_data = temp_data\n\nplt.figure(figsize=(8, 8))\nplot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "If we were to use the same method to fit this inverted data, obviously it wouldn't work well, and we would expect to see a neural network trained to fit only to the square mean of the data.", "_____no_output_____" ] ], [ [ "x = Variable(torch.from_numpy(x_data.reshape(NSAMPLE, D_in)))\ny = Variable(torch.from_numpy(y_data.reshape(NSAMPLE, D_out)), requires_grad=False)\nlearning_rate = 0.01\noptimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, alpha=0.8)\nfor t in range(3000):\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n if (t % 300 == 0):\n print(t, loss.data[0])\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()", "0 59.484222412109375\n300 23.014665603637695\n600 22.61995506286621\n900 22.459156036376953\n1200 22.369720458984375\n1500 22.29016876220703\n1800 22.220983505249023\n2100 22.154090881347656\n2400 22.101402282714844\n2700 22.068389892578125\n" ], [ "x_test = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T\nx_test = Variable(torch.from_numpy(x_test.reshape(NSAMPLE, D_in)))\ny_test = model(x_test)\nplt.figure(figsize=(8, 8))\nplt.plot(x_data,y_data,'ro', x_test.data.numpy(),y_test.data.numpy(),'bo',alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "Our current model only predicts one output value for each input, so this approach will fail miserably. What we want is a model that has the capacity to predict a range of different output values for each input. In the next section we implement a Mixture Density Network (MDN) to achieve this task.\n", "_____no_output_____" ], [ "## Mixture Density Networks ##", "_____no_output_____" ], [ "\nOur current model only predicts one output value for each input, so this approach will fail. What we want is a model that has the capacity to predict a range of different output values for each input. In the next section we implement a *Mixture Density Network (MDN)* to do achieve this task.\n\nMixture Density Networks, developed by Christopher Bishop in the 1990s, is an attempt to address this problem. Rather to have the network predict a single output value, the MDN predicts an entire *probability distribution* of the output, so we can sample several possible different output values for a given input.\n\nThis concept is quite powerful, and can be employed many current areas of machine learning research. It also allows us to calculate some sort of confidence factor in the predictions that the network is making.\n\nThe inverse sinusoidal data we chose is not just for a toy problem, as there are applications in the field of robotics, for example, where we want to determine which angle we need to move the robot arm to achieve a target location. MDNs are also used to model handwriting, where the next stroke is drawn from a probability distribution of multiple possibilities, rather than sticking to one prediction.\n\nBishop's implementation of MDNs will predict a class of probability distributions called Mixture Gaussian distributions, where the output value is modelled as a sum of many gaussian random values, each with different means and standard deviations. So for each input $x$, we will predict a probability distribution function $P(Y = y | X = x)$ that is approximated by a weighted sum of different gaussian distributions.", "_____no_output_____" ], [ "$P(Y = y | X = x) = \\sum_{k=0}^{K-1} \\Pi_{k}(x) \\phi(y, \\mu_{k}(x), \\sigma_{k}(x)), \\sum_{k=0}^{K-1} \\Pi_{k}(x) = 1$", "_____no_output_____" ], [ "Our network will therefore predict the *parameters* of the pdf, in our case the set of $\\mu$, $\\sigma$, and $\\Pi$ values for each input $x$. Rather than predict $y$ directly, we will need to sample from our distribution to sample $y$. This will allow us to have multiple possible values of $y$ for a given $x$.\n\nEach of the parameters $\\Pi_{k}(x), \\mu_{k}(x), \\sigma_{k}(x)$ of the distribution will be determined by the neural network, as a function of the input $x$. There is a restriction that the sum of $\\Pi_{k}(x)$ add up to one, to ensure that the pdf integrates to 1. In addition, $\\sigma_{k}(x)$ must be strictly positive.", "_____no_output_____" ], [ "In our implementation, we will use a neural network of one hidden later with 100 nodes, and also generate 20 mixtures, hence there will be 60 actual outputs of our neural network of a single input. Our definition will be split into 2 parts:\n\n$Z = W_{out} \\max( W_{in} X + b_{in}, 0) + b_{out}$", "_____no_output_____" ], [ "In the first part, $Z$ is a vector of 60 values that will be then splitup into three equal parts, $[Z_{\\Pi}, Z_{\\sigma}, Z_{\\mu}] = Z$, where each of $Z_{\\Pi}$, $Z_{\\sigma}$, $Z_{\\mu}$ are vectors of length 20.", "_____no_output_____" ], [ "In this PyTorch implementation, unlike the TF version, we will implement this operation with 3 seperate Linear layers, rather than splitting a large $Z$, for clarity:\n\n$Z_{\\Pi} = W_{\\Pi} \\max( W_{in} X + b_{in}, 0) + b_{\\Pi}$\n\n$Z_{\\sigma} = W_{\\sigma} \\max( W_{in} X + b_{in}, 0) + b_{\\sigma}$\n\n$Z_{\\mu} = W_{\\mu} \\max( W_{in} X + b_{in}, 0) + b_{\\mu}$\n\nIn the second part, the parameters of the pdf will be defined as below to satisfy the earlier conditions:\n\n$\\Pi = \\frac{\\exp(Z_{\\Pi})}{\\sum_{i=0}^{20} exp(Z_{\\Pi, i})}, \\\\ \\sigma = \\exp(Z_{\\sigma}), \\\\ \\mu = Z_{\\mu}$", "_____no_output_____" ], [ "$\\Pi_{k}$ are put into a *softmax* operator to ensure that the sum adds to one, and that each mixture probability is positive. Each $\\sigma_{k}$ will also be positive due to the exponential operator.\n\nBelow is the PyTorch implementation of the MDN network:", "_____no_output_____" ] ], [ [ "NHIDDEN = 100 # hidden units\nKMIX = 20 # number of mixtures", "_____no_output_____" ], [ "class MDN(nn.Module):\n def __init__(self, hidden_size, num_mixtures):\n super(MDN, self).__init__()\n self.fc_in = nn.Linear(1, hidden_size) \n self.relu = nn.ReLU()\n self.pi_out = torch.nn.Sequential(\n nn.Linear(hidden_size, num_mixtures),\n nn.Softmax()\n )\n self.sigma_out = nn.Linear(hidden_size, num_mixtures)\n self.mu_out = nn.Linear(hidden_size, num_mixtures) \n \n def forward(self, x):\n out = self.fc_in(x)\n out = self.relu(out)\n out_pi = self.pi_out(out)\n out_sigma = torch.exp(self.sigma_out(out))\n out_mu = self.mu_out(out)\n return (out_pi, out_sigma, out_mu)", "_____no_output_____" ] ], [ [ "Let's define the inverted data we want to train our MDN to predict later. As this is a more involved prediction task, I used a higher number of samples compared to the simple data fitting task earlier.", "_____no_output_____" ] ], [ [ "NSAMPLE = 2500\n\ny_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T\nr_data = np.float32(np.random.normal(size=(NSAMPLE,1))) # random noise\nx_data = np.float32(np.sin(0.75*y_data)*7.0+y_data*0.5+r_data*1.0)", "_____no_output_____" ], [ "x_train = Variable(torch.from_numpy(x_data.reshape(NSAMPLE, 1)))\ny_train = Variable(torch.from_numpy(y_data.reshape(NSAMPLE, 1)), requires_grad=False)", "_____no_output_____" ], [ "plt.figure(figsize=(8, 8))\nplt.plot(x_train.data.numpy(),y_train.data.numpy(),'ro', alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "We cannot simply use the min square error L2 lost function in this task the output is an entire description of the probability distribution. A more suitable loss function is to minimise the logarithm of the likelihood of the distribution vs the training data:\n\n$CostFunction(y | x) = -\\log[ \\sum_{k}^K \\Pi_{k}(x) \\phi(y, \\mu(x), \\sigma(x)) ]$", "_____no_output_____" ], [ "So for every $(x,y)$ point in the training data set, we can compute a cost function based on the predicted distribution versus the actual points, and then attempt the minimise the sum of all the costs combined. To those who are familiar with logistic regression and cross entropy minimisation of softmax, this is a similar approach, but with non-discretised states.\n\nWe have to implement this cost function ourselves:", "_____no_output_____" ] ], [ [ "oneDivSqrtTwoPI = 1.0 / math.sqrt(2.0*math.pi) # normalisation factor for gaussian.\ndef gaussian_distribution(y, mu, sigma):\n # braodcast subtraction with mean and normalization to sigma\n result = (y.expand_as(mu) - mu) * torch.reciprocal(sigma)\n result = - 0.5 * (result * result)\n return (torch.exp(result) * torch.reciprocal(sigma)) * oneDivSqrtTwoPI", "_____no_output_____" ], [ "def mdn_loss_function(out_pi, out_sigma, out_mu, y):\n epsilon = 1e-3\n result = gaussian_distribution(y, out_mu, out_sigma) * out_pi\n result = torch.sum(result, dim=1)\n result = - torch.log(epsilon + result)\n return torch.mean(result)", "_____no_output_____" ] ], [ [ "Let's define our model, and use the Adam optimizer to train our model below:", "_____no_output_____" ] ], [ [ "model = MDN(hidden_size=NHIDDEN, num_mixtures=KMIX)", "_____no_output_____" ], [ "learning_rate = 0.00001\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)", "_____no_output_____" ], [ "for t in range(20000):\n (out_pi, out_sigma, out_mu) = model(x_train)\n loss = mdn_loss_function(out_pi, out_sigma, out_mu, y_train)\n if (t % 1000 == 0):\n print(t, loss.data[0])\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()", "0 4.988687992095947\n1000 3.4866292476654053\n2000 3.1824162006378174\n3000 2.9246561527252197\n4000 2.7802634239196777\n5000 2.672682523727417\n6000 2.5783588886260986\n7000 2.5089898109436035\n8000 2.4450607299804688\n9000 2.398449420928955\n10000 2.3576488494873047\n11000 2.3166143894195557\n12000 2.276536464691162\n13000 2.239301919937134\n14000 2.1948606967926025\n15000 2.1471312046051025\n16000 2.0966522693634033\n17000 2.042475461959839\n18000 1.9856466054916382\n19000 1.9275684356689453\n" ] ], [ [ "We want to use our network to generate the parameters of the pdf for us to sample from. In the code below, we will sample $M=10$ values of $y$ for every $x$ input, and compare the sampled results with the training data.", "_____no_output_____" ] ], [ [ "x_test_data = np.float32(np.random.uniform(-15, 15, (1, NSAMPLE))).T\nx_test = Variable(torch.from_numpy(x_test_data.reshape(NSAMPLE, 1)))", "_____no_output_____" ], [ "(out_pi_test, out_sigma_test, out_mu_test) = model(x_test)", "_____no_output_____" ], [ "out_pi_test_data = out_pi_test.data.numpy()\nout_sigma_test_data = out_sigma_test.data.numpy()\nout_mu_test_data = out_mu_test.data.numpy()", "_____no_output_____" ], [ "def get_pi_idx(x, pdf):\n N = pdf.size\n accumulate = 0\n for i in range(0, N):\n accumulate += pdf[i]\n if (accumulate >= x):\n return i\n print('error with sampling ensemble')\n return -1\n\ndef generate_ensemble(M = 10):\n # for each point in X, generate M=10 ensembles\n NTEST = x_test_data.size\n result = np.random.rand(NTEST, M) # initially random [0, 1]\n rn = np.random.randn(NTEST, M) # normal random matrix (0.0, 1.0)\n mu = 0\n std = 0\n idx = 0\n\n # transforms result into random ensembles\n for j in range(0, M):\n for i in range(0, NTEST):\n idx = get_pi_idx(result[i, j], out_pi_test_data[i])\n mu = out_mu_test_data[i, idx]\n std = out_sigma_test_data[i, idx]\n result[i, j] = mu + rn[i, j]*std\n return result", "_____no_output_____" ], [ "y_test_data = generate_ensemble()", "_____no_output_____" ], [ "plt.figure(figsize=(8, 8))\nplt.plot(x_test_data,y_test_data,'b.', x_data,y_data,'r.',alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "In the above graph, we plot out the generated data we sampled from the MDN distribution, in blue. We also plot the original training data in red over the predictions. Apart from a few outliers, the distributions seem to match the data. We can also plot a graph of $\\mu(x)$ as well to interpret what the neural net is actually doing:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8, 8))\nplt.plot(x_test_data,out_mu_test_data,'g.', x_data,y_data,'r.',alpha=0.3)\nplt.show()", "_____no_output_____" ] ], [ [ "In the plot above, we see that for every point on the $x$-axis, there are multiple lines or states where $y$ may be, and we select these states with probabilities modelled by $\\Pi$ .", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]